applied-ai-018 commited on
Commit
91ccd8d
·
verified ·
1 Parent(s): c90f994

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/native_function_generation.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/CompositeViewCopyKernels.cpp +73 -0
  12. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunction.h +23 -0
  13. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions_inl.h +22 -0
  14. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h +19 -0
  15. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.h +143 -0
  16. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h +19 -0
  17. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyNonNativeIr.h +11 -0
  18. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h +17 -0
  19. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunction.h +23 -0
  20. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h +19 -0
  21. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp +19 -0
  22. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.cpp +15 -0
  23. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.h +32 -0
  24. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchDefinitions.ini +24 -0
  25. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp +54 -0
  26. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterFunctionalization.cpp +110 -0
  27. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterSchema.cpp +13 -0
  28. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegistrationDeclarations.h +4 -0
  29. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorBody.h +753 -0
  30. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp +61 -0
  31. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPU.cpp +19 -0
  32. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPUKernel.cpp +14 -0
  33. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.h +32 -0
  34. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/enum_tag.h +10 -0
  35. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel +4 -0
  36. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/README.md +3 -0
  37. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py +0 -0
  38. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl +14 -0
  40. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py +31 -0
  41. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/deprecated.yaml +134 -0
  42. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/derivatives.yaml +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py +129 -0
  44. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd.py +146 -0
  45. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd_functions.py +912 -0
  46. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py +675 -0
  47. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_python_functions.py +1396 -0
  48. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_trace_type.py +535 -0
  49. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_factories.py +115 -0
  50. llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py +2162 -0
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (537 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/code_template.cpython-310.pyc ADDED
Binary file (3.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/context.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen.cpython-310.pyc ADDED
Binary file (66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_backend_stubs.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_executorch.cpython-310.pyc ADDED
Binary file (27.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/gen_vmap_plumbing.cpython-310.pyc ADDED
Binary file (8.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/model.cpython-310.pyc ADDED
Binary file (65.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/native_function_generation.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/__pycache__/yaml_utils.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/CompositeViewCopyKernels.cpp ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2
+ // ${generated_comment}
3
+
4
+ #include <ATen/InferSize.h>
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/native/Resize.h>
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/Operators.h>
10
+ #else
11
+ #include <ATen/ops/clone.h>
12
+ $ops_headers
13
+ #endif
14
+
15
+ namespace at {
16
+ namespace native {
17
+
18
+ // This file contains a number of kernels for aten functions that are fully code-generated.
19
+ // TODO: rename this file to something more generic.
20
+
21
+ namespace {
22
+ at::Tensor clone_arg(const at::Tensor& t) {
23
+ return t.clone();
24
+ }
25
+
26
+ std::vector<at::Tensor> clone_arg(const at::TensorList& t_list) {
27
+ std::vector<at::Tensor> out(t_list.size());
28
+ for (const auto& i : c10::irange(t_list.size())) {
29
+ out[i] = t_list[i].clone();
30
+ }
31
+ return out;
32
+ }
33
+
34
+ // duped with gen_resize_out_helper from structured kernels
35
+ void copy_arg(const at::Tensor& dst, const at::Tensor& src) {
36
+ TORCH_CHECK(src.dtype() == dst.dtype(),
37
+ "Expected out tensor to have dtype ", src.dtype(), ", but got ", dst.dtype(), " instead");
38
+ TORCH_CHECK(src.device() == dst.device(),
39
+ "Expected out tensor to have device ", src.device(), ", but got ", dst.device(), " instead");
40
+ dst.copy_(src);
41
+ }
42
+
43
+ void copy_arg(const at::TensorList& dst, const at::TensorList& src) {
44
+ TORCH_INTERNAL_ASSERT(dst.size() == src.size());
45
+ for (const auto& i : c10::irange(dst.size())) {
46
+ copy_arg(dst[i], src[i]);
47
+ }
48
+ }
49
+
50
+ // TODO: this doesn't handle restriding empty tensors correctly; see
51
+ // gen_resize_out_helper for the correct algorithm
52
+
53
+ void resize_out_helper(const at::Tensor& dst, const at::Tensor& src) {
54
+ at::native::resize_output(dst, src.sizes());
55
+ }
56
+
57
+ void resize_out_helper(const at::TensorList& dst, const at::TensorList& src) {
58
+ TORCH_INTERNAL_ASSERT(dst.size() == src.size());
59
+ for (const auto& i : c10::irange(dst.size())) {
60
+ at::native::resize_output(dst[i], src[i].sizes());
61
+ }
62
+ }
63
+ }
64
+
65
+
66
+ ${CompositeViewCopyKernel_Definitions}
67
+
68
+ ${GeneratedCompositeFunctional_Definitions}
69
+
70
+ ${GeneratedCompositeOut_Definitions}
71
+
72
+ } // namespace native
73
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunction.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // ${generated_comment}
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace ${dispatch_namespace} {
19
+
20
+ ${dispatch_namespaced_declarations}
21
+
22
+ } // namespace ${dispatch_namespace}
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions_inl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // ${generated_comment}
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_${dispatch_namespace}_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ ${DispatchKeyFunctions_inl_includes}
20
+
21
+
22
+ ${dispatch_namespaced_declarations}
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // an external backend might generate file within its code tree
4
+ // and check all the source files within the tree with clang-format.
5
+ // so, disable it since the backend might have a different config.
6
+ // clang-format off
7
+
8
+ // ${generated_comment}
9
+
10
+ #include <ATen/Tensor.h>
11
+
12
+ ${namespace_prologue}
13
+
14
+ struct ${class_name} {
15
+
16
+ ${dispatch_declarations}
17
+
18
+ };
19
+ ${namespace_epilogue}
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14
+ #error This change adds a dependency on all pytorch operators, meaning the \
15
+ file will need to be re-compiled every time an operator is changed or added. \
16
+ Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
17
+ see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18
+ #endif
19
+
20
+ // NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
21
+ //
22
+ // In ATen, certain generated headers files include the definitions of
23
+ // every single operator in PyTorch. Unfortunately this means every
24
+ // time an operator signature is updated or changed in
25
+ // native_functions.yaml, you (and every other PyTorch developer) need
26
+ // to recompile every source file that includes any of these headers.
27
+ //
28
+ // To break up these header dependencies, and improve incremental
29
+ // build times for all PyTorch developers. These headers are split
30
+ // into per-operator headers in the `ATen/ops` folder. This limits
31
+ // incremental builds to only changes to methods of `Tensor`, or files
32
+ // that use the specific operator being changed. With `at::sum` as an
33
+ // example, you should include
34
+ //
35
+ // <ATen/ops/sum.h> // instead of ATen/Functions.h
36
+ // <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
37
+ // <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
38
+ // <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
39
+ //
40
+ // However, even if you're careful to use this in your own code.
41
+ // `Functions.h` might be included indirectly through another header
42
+ // without you realising. To avoid this, you can add
43
+ //
44
+ // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
45
+ //
46
+ // to the top of your source file. This way any time the non-specific
47
+ // headers are included, the compiler will error out.
48
+ //
49
+ // Also, be aware that `ops` are not available in all build
50
+ // configurations (namely fb-internal) so you must guard these
51
+ // includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
52
+ //
53
+ // #ifndef AT_PER_OPERATOR_HEADERS
54
+ // #include <ATen/Functions.h>
55
+ // #else
56
+ // #include <ATen/ops/sum.h>
57
+ // #endif
58
+
59
+ #include <ATen/Context.h>
60
+ #include <ATen/DeviceGuard.h>
61
+ #include <ATen/TensorUtils.h>
62
+ #include <ATen/TracerMode.h>
63
+ #include <ATen/core/Generator.h>
64
+ #include <ATen/core/Reduction.h>
65
+ #include <c10/core/SymInt.h>
66
+ #include <ATen/core/Tensor.h>
67
+ #include <c10/core/Scalar.h>
68
+ #include <c10/core/Storage.h>
69
+ #include <c10/core/TensorOptions.h>
70
+ #include <c10/util/Deprecated.h>
71
+ #include <c10/util/Optional.h>
72
+ #include <c10/util/OptionalArrayRef.h>
73
+
74
+ #include <ATen/ops/from_blob.h>
75
+ #include <ATen/ops/tensor.h>
76
+
77
+ ${Functions_includes}
78
+
79
+ namespace at {
80
+
81
+ ${Functions_declarations}
82
+
83
+ // Special C++ only overloads for std()-like functions (See gh-40287)
84
+ // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
85
+ // So, for example std(0) would select the std(unbiased=False) overload
86
+ TORCH_API inline Tensor var(const Tensor& self, int dim) {
87
+ return at::var(self, IntArrayRef{dim});
88
+ }
89
+ TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
90
+ return at::var_mean(self, IntArrayRef{dim});
91
+ }
92
+ TORCH_API inline Tensor std(const Tensor& self, int dim) {
93
+ return at::std(self, IntArrayRef{dim});
94
+ }
95
+ TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
96
+ return at::std_mean(self, IntArrayRef{dim});
97
+ }
98
+
99
+ inline int64_t numel(const Tensor& tensor) {
100
+ return tensor.numel();
101
+ }
102
+
103
+ inline int64_t size(const Tensor& tensor, int64_t dim) {
104
+ return tensor.size(dim);
105
+ }
106
+
107
+ inline int64_t stride(const Tensor& tensor, int64_t dim) {
108
+ return tensor.stride(dim);
109
+ }
110
+
111
+ inline bool is_complex(const Tensor& tensor) {
112
+ return tensor.is_complex();
113
+ }
114
+
115
+ inline bool is_floating_point(const Tensor& tensor) {
116
+ return tensor.is_floating_point();
117
+ }
118
+
119
+ inline bool is_signed(const Tensor& tensor) {
120
+ return tensor.is_signed();
121
+ }
122
+
123
+ inline bool is_inference(const Tensor& tensor) {
124
+ return tensor.is_inference();
125
+ }
126
+
127
+ inline bool _is_zerotensor(const Tensor& tensor) {
128
+ return tensor._is_zerotensor();
129
+ }
130
+
131
+ inline bool is_conj(const Tensor& tensor) {
132
+ return tensor.is_conj();
133
+ }
134
+
135
+ inline Tensor conj(const Tensor& tensor) {
136
+ return tensor.conj();
137
+ }
138
+
139
+ inline bool is_neg(const Tensor& tensor) {
140
+ return tensor.is_neg();
141
+ }
142
+
143
+ }
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file contains autogenerated LazyTensor IR nodes
4
+ ${lazy_ir_sysinc}
5
+ ${lazy_ir_inc}
6
+
7
+ ${namespace_prologue}
8
+ using at::operator<<;
9
+
10
+ // kNullValue is used to contribute a static hash value any time
11
+ // a node has an Optional<Value> input that is nullopt. It is important
12
+ // to differentiate between HASH(nullopt, something) and HASH(something, nullopt),
13
+ // and using kNullValue in the hash function in the order of arguments
14
+ // serves this purpose.
15
+ static const torch::lazy::Value kNullValue = torch::lazy::Value();
16
+
17
+ ${ir_declarations}
18
+
19
+ ${namespace_epilogue}
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyNonNativeIr.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ ${lazy_non_native_ir_inc}
4
+
5
+ // This file contains autogenerated LazyTensor Non Native IR nodes
6
+
7
+ ${namespace_prologue}
8
+
9
+ ${non_native_ir_nodes}
10
+
11
+ ${namespace_epilogue}
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ ${extra_includes}
16
+
17
+ ${native_function_declarations}
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunction.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ ${meta_function_declarations}
21
+
22
+ } // namespace native
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/core/IListRef.h>
7
+ #include <ATen/TensorMeta.h>
8
+ #include <ATen/TensorIterator.h>
9
+
10
+ ${NativeMetaFunctions_includes}
11
+
12
+ namespace at {
13
+
14
+ namespace meta {
15
+
16
+ ${NativeMetaFunctions_declarations}
17
+
18
+ } // namespace meta
19
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Tensor.h>
2
+ #include <ATen/core/dispatch/Dispatcher.h>
3
+
4
+ // ${generated_comment}
5
+ // NOTE See [Sharded File] comment in VariableType
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/Operators.h>
9
+ #else
10
+ ${operator_headers}
11
+ #endif
12
+
13
+ ${static_dispatch_extra_headers}
14
+
15
+ namespace at { namespace _ops {
16
+
17
+ ${definitions}
18
+
19
+ }} // namespace at::_ops
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.cpp ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // ${generated_comment}
2
+
3
+ #include <ATen/RedispatchFunctions.h>
4
+ #include <ATen/Functions.h>
5
+
6
+ #include <ATen/core/dispatch/Dispatcher.h>
7
+ #include <ATen/core/op_registration/adaption.h>
8
+
9
+ namespace at {
10
+
11
+ namespace redispatch {
12
+ ${function_redispatch_definitions}
13
+ } // namespace redispatch
14
+
15
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #ifdef TORCH_ASSERT_ONLY_METHOD_OPERATORS
6
+ #error This change adds a dependency on all pytorch operators, meaning the \
7
+ file will need to be re-compiled every time an operator is changed or added. \
8
+ Consider using the at::_ops::{name}::redispatch() interface by including \
9
+ the specific operator from <ATen/ops/{my_operator}_ops.h>
10
+ #endif
11
+
12
+ #include <c10/core/Scalar.h>
13
+ #include <ATen/Tensor.h>
14
+ #include <c10/core/Storage.h>
15
+ #include <ATen/core/Generator.h>
16
+ #include <c10/util/Deprecated.h>
17
+ #include <ATen/DeviceGuard.h>
18
+ #include <c10/core/TensorOptions.h>
19
+ #include <ATen/core/Reduction.h>
20
+ #include <c10/util/Optional.h>
21
+ #include <ATen/TensorUtils.h>
22
+ #include <ATen/Context.h>
23
+ #include <ATen/TracerMode.h>
24
+ #include <ATen/Operators.h>
25
+
26
+ namespace at {
27
+
28
+ namespace redispatch {
29
+ ${function_redispatch_definitions}
30
+ } // namespace redispatch
31
+
32
+ }
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchDefinitions.ini ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ${ns_prologue}
2
+
3
+ // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
4
+ // ambiguity with conflicting identifiers that may have been defined in
5
+ // at namespace already.
6
+ namespace {
7
+
8
+ ${dispatch_helpers}
9
+
10
+ ${dispatch_anonymous_definitions}
11
+
12
+ ${static_init_dispatch_registrations}
13
+
14
+ } // anonymous namespace
15
+
16
+ ${deferred_dispatch_registrations}
17
+
18
+ namespace ${dispatch_namespace} {
19
+
20
+ ${dispatch_namespaced_definitions}
21
+
22
+ } // namespace ${dispatch_namespace}
23
+
24
+ ${ns_epilogue}
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // required for old g++ to compile PRId64 macros, see
2
+ // https://github.com/pytorch/pytorch/issues/3571
3
+ // for context
4
+ #ifndef __STDC_FORMAT_MACROS
5
+ #define __STDC_FORMAT_MACROS
6
+ #endif
7
+
8
+ // an external backend might generate file within its code tree
9
+ // and check all the source files within the tree with clang-format.
10
+ // so, disable it since the backend might have a different config.
11
+ // clang-format off
12
+
13
+ // NOTE: This condition is true for all PyTorch internal libraries, it
14
+ // just excludes external projects such as torch_xla which
15
+ // re-use some of the PyTorch codegen machinery.
16
+ #if defined(CAFFE2_BUILD_MAIN_LIB) || \
17
+ defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18
+ defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19
+ defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20
+ defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22
+ #endif
23
+
24
+ // ${generated_comment}
25
+
26
+ #include <c10/core/TensorImpl.h>
27
+ #include <c10/core/Allocator.h>
28
+ #include <ATen/DeviceGuard.h>
29
+ #include <ATen/NamedTensorUtils.h>
30
+ #include <ATen/Utils.h>
31
+ #include <ATen/WrapDimUtils.h>
32
+ #include <ATen/Dispatch.h>
33
+ #include <c10/util/ExclusivelyOwned.h>
34
+ #include <c10/util/Half.h>
35
+ #include <c10/core/UndefinedTensorImpl.h>
36
+ #include <c10/util/Optional.h>
37
+ #include <ATen/Tensor.h>
38
+ #include <ATen/native/Resize.h>
39
+
40
+ #include <cstddef>
41
+ #include <functional>
42
+ #include <memory>
43
+ #include <utility>
44
+
45
+ #include <ATen/Config.h>
46
+ #include <ATen/core/op_registration/adaption.h>
47
+ #include <torch/library.h>
48
+ $extra_cuda_headers
49
+ $external_backend_headers
50
+ $dispatch_headers
51
+ $ops_headers
52
+
53
+ // See template file RegisterDispatchDefinitions.ini
54
+ $dispatch_definitions
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterFunctionalization.cpp ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2
+ // ${generated_comment}
3
+
4
+ #include <ATen/core/LegacyTypeDispatch.h>
5
+ #include <ATen/EmptyTensor.h>
6
+ #include <ATen/FunctionalTensorWrapper.h>
7
+ #include <ATen/FunctionalInverses.h>
8
+ #include <ATen/MemoryOverlap.h>
9
+ #include <torch/library.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/Operators.h>
13
+ #include <ATen/NativeFunctions.h>
14
+ #else
15
+ // needed for the meta tensor calls to get stride info in functionalization
16
+ #include <ATen/ops/empty_strided_native.h>
17
+ // needed for special handling of copy_().
18
+ // See Note [functionalizating copy_() and not preserving strides]
19
+ #include <ATen/ops/to_ops.h>
20
+ #include <ATen/ops/expand_copy_ops.h>
21
+
22
+ $ops_headers
23
+ #endif
24
+
25
+ namespace at {
26
+ namespace functionalization {
27
+
28
+ // This keyset is used by functionalization when it calls into meta kernels
29
+ // to accurately propagate stride metadata.
30
+ // Exclude any modes: the purpose of calling into meta kernels is only as an implementation
31
+ // detail to perform shape inference, and we don't want any modal keys to run.
32
+ // Specifically, we want to prevent functionalization and Python modes from running.
33
+ constexpr auto exclude_keys_for_meta_dispatch =
34
+ c10::functorch_transforms_ks |
35
+ c10::DispatchKeySet({
36
+ c10::DispatchKey::FuncTorchDynamicLayerBackMode,
37
+ c10::DispatchKey::FuncTorchDynamicLayerFrontMode,
38
+ c10::DispatchKey::Python,
39
+ c10::DispatchKey::PreDispatch,
40
+
41
+ });
42
+
43
+ // Helper around at::has_internal_overlap.
44
+ // The ATen util is used in hot-path eager mode: it's always fast,
45
+ // but might return TOO_HARD sometimes.
46
+ // During functionalization, we're ok taking a bit longer
47
+ // to detect memory overlap.
48
+ inline bool has_internal_overlap_helper(const at::Tensor t) {
49
+ auto has_overlap = at::has_internal_overlap(t);
50
+ if (has_overlap == at::MemOverlap::Yes) return true;
51
+ if (has_overlap == at::MemOverlap::No) return false;
52
+ return false;
53
+ }
54
+
55
+
56
+ inline Tensor to_meta(const Tensor& t) {
57
+ if (!t.defined()) return t;
58
+ return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(),
59
+ /*dtype=*/c10::make_optional(t.scalar_type()), /*layout=*/c10::make_optional(t.layout()),
60
+ /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);
61
+ }
62
+
63
+ inline c10::optional<Tensor> to_meta(const c10::optional<Tensor>& t) {
64
+ if (t.has_value()) {
65
+ return c10::make_optional<Tensor>(to_meta(*t));
66
+ }
67
+ return c10::nullopt;
68
+ }
69
+
70
+ inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) {
71
+ std::vector<Tensor> outputs;
72
+ outputs.reserve(t_list.size());
73
+ for (const auto& tensor : t_list) {
74
+ outputs.push_back(to_meta(tensor));
75
+ }
76
+ return outputs;
77
+ }
78
+
79
+ inline c10::List<Tensor> to_meta(const c10::List<Tensor>& t_list) {
80
+ c10::List<Tensor> outputs;
81
+ outputs.reserve(t_list.size());
82
+ for (const auto i : c10::irange(t_list.size())) {
83
+ outputs.push_back(to_meta(t_list[i]));
84
+ }
85
+ return outputs;
86
+ }
87
+
88
+ inline c10::List<c10::optional<Tensor>> to_meta(const c10::List<c10::optional<Tensor>>& t_list) {
89
+ c10::List<c10::optional<Tensor>> outputs;
90
+ outputs.reserve(t_list.size());
91
+ for (const auto i : c10::irange(t_list.size())) {
92
+ outputs.push_back(to_meta(t_list[i]));
93
+ }
94
+ return outputs;
95
+ }
96
+
97
+
98
+ ${func_definitions}
99
+
100
+ } // namespace functionalization
101
+
102
+ namespace {
103
+
104
+ TORCH_LIBRARY_IMPL(aten, Functionalize, m) {
105
+ ${func_registrations};
106
+ }
107
+
108
+ } // namespace
109
+
110
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterSchema.cpp ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // ${generated_comment}
2
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
3
+ #include <torch/library.h>
4
+
5
+ namespace at {
6
+ TORCH_LIBRARY(aten, m) {
7
+ ${aten_schema_registrations};
8
+ // Distributed Ops
9
+ // Implementations located in torch/csrc/jit/runtime/register_distributed_ops.cpp
10
+ m.def("get_gradients(int context_id) -> Dict(Tensor, Tensor)");
11
+ }
12
+ ${schema_registrations}
13
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegistrationDeclarations.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ // This file contains all native_functions that can be registered to
2
+ // and the schema string that they should be registered with
3
+
4
+ ${registration_declarations}
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorBody.h ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef TORCH_ASSERT_NO_OPERATORS
4
+ #error This change adds a dependency on native_functions.yaml, \
5
+ meaning the file will need to be re-compiled every time an operator \
6
+ is changed or added. Consider if your change would be better placed in \
7
+ another file, or if a more specific header might achieve the same goal. \
8
+ See NOTE: [Tensor vs. TensorBase]
9
+ #endif
10
+
11
+ #include <c10/core/Device.h>
12
+ #include <c10/core/Layout.h>
13
+ #include <c10/core/MemoryFormat.h>
14
+ #include <c10/core/QScheme.h>
15
+ #include <c10/core/Stream.h>
16
+ #include <c10/core/Scalar.h>
17
+ #include <c10/core/ScalarType.h>
18
+ #include <c10/core/ScalarTypeToTypeMeta.h>
19
+ #include <c10/core/Storage.h>
20
+ #include <c10/core/TensorImpl.h>
21
+ #include <c10/core/UndefinedTensorImpl.h>
22
+ #include <c10/core/WrapDimMinimal.h>
23
+ #include <c10/util/Exception.h>
24
+ #include <c10/util/ExclusivelyOwned.h>
25
+ #include <c10/util/Deprecated.h>
26
+ #include <c10/util/MaybeOwned.h>
27
+ #include <c10/util/Optional.h>
28
+ #include <c10/util/OptionalArrayRef.h>
29
+ #include <c10/util/intrusive_ptr.h>
30
+ #include <c10/macros/Export.h>
31
+ #include <ATen/core/CheckMemoryFormat.h>
32
+ #include <ATen/core/DeprecatedTypePropertiesRegistry.h>
33
+ #include <ATen/core/DeprecatedTypeProperties.h>
34
+ #include <ATen/core/NamedTensor.h>
35
+ #include <ATen/core/QuantizerBase.h>
36
+ #include <c10/core/SymInt.h>
37
+ #include <ATen/core/TensorAccessor.h>
38
+ #include <ATen/core/TensorBase.h>
39
+
40
+
41
+ #include <ATen/MethodOperators.h>
42
+
43
+ namespace c10{
44
+ template<class T> class List;
45
+ template<class T> class IListRef;
46
+ }
47
+ namespace at {
48
+ struct Generator;
49
+ struct Type;
50
+ class DeprecatedTypeProperties;
51
+ class Tensor;
52
+ } // namespace at
53
+ namespace at {
54
+ namespace indexing {
55
+ struct TensorIndex;
56
+ } // namespace indexing
57
+ } // namespace at
58
+
59
+ namespace torch { namespace autograd {
60
+
61
+ struct Node;
62
+
63
+ }} // namespace torch::autograd
64
+
65
+ namespace at {
66
+
67
+ class OptionalTensorRef;
68
+ class TensorRef;
69
+ class Tensor;
70
+ using TensorList = ArrayRef<Tensor>;
71
+ using ITensorList = c10::IListRef<Tensor>;
72
+
73
+ using Stream = c10::Stream;
74
+
75
+ // Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which
76
+ // has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr.
77
+ //
78
+ // For example:
79
+ //
80
+ // void func(Tensor a) {
81
+ // Tensor b = a;
82
+ // ...
83
+ // }
84
+ //
85
+ // In this example, when we say Tensor b = a, we are creating a new object that points to the
86
+ // same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the
87
+ // destructor decrements the reference count by calling release() on the TensorImpl it points to.
88
+ // The existing constructors, operator overloads, etc. take care to implement the correct semantics.
89
+ //
90
+ // Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
91
+ // special care must be taken to handle this.
92
+ class TORCH_API Tensor: public TensorBase {
93
+ protected:
94
+ // Create a Tensor with a +0 reference count. Special care must be
95
+ // taken to avoid decrementing this reference count at destruction
96
+ // time. Intended to support MaybeOwnedTraits<Tensor>.
97
+ explicit Tensor(unsafe_borrow_t, const TensorBase& rhs): TensorBase(unsafe_borrow_t{}, rhs) {}
98
+ friend MaybeOwnedTraits<Tensor>;
99
+ friend OptionalTensorRef;
100
+ friend TensorRef;
101
+
102
+ public:
103
+ Tensor() = default;
104
+ // This constructor should not be used by end users and is an implementation
105
+ // detail invoked by autogenerated code.
106
+ explicit Tensor(
107
+ c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
108
+ : TensorBase(std::move(tensor_impl)) {}
109
+ Tensor(const Tensor &tensor) = default;
110
+ Tensor(Tensor &&tensor) = default;
111
+
112
+ // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount
113
+ explicit Tensor(const TensorBase &base): TensorBase(base) {}
114
+ /*implicit*/ Tensor(TensorBase &&base): TensorBase(std::move(base)) {}
115
+
116
+ // Creates a new wrapper from TensorImpl. Intentionally a free method because
117
+ // it should be used with care. Checks necessary invariants
118
+ static Tensor wrap_tensor_impl(
119
+ c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
120
+ return TensorBase::wrap_tensor_impl(std::move(tensor_impl));
121
+ }
122
+
123
+ Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
124
+ return TensorBase::contiguous(memory_format);
125
+ }
126
+
127
+ Tensor conj() const {
128
+ if (!this->is_complex()) {
129
+ return *this;
130
+ }
131
+
132
+ switch (this->layout()) {
133
+ case at::kSparse:
134
+ case at::kSparseCsr:
135
+ case at::kSparseCsc:
136
+ case at::kSparseBsr:
137
+ case at::kSparseBsc:
138
+ return this->conj_physical();
139
+ default:
140
+ return this->_conj();
141
+ }
142
+ }
143
+
144
+ // Aliased by Dimname overloads, so need explicit using
145
+ using TensorBase::size;
146
+ using TensorBase::sym_size;
147
+ using TensorBase::stride;
148
+
149
+ /// Should be used if *this can reasonably be expected to be contiguous and
150
+ /// performance is important.
151
+ /// Compared to contiguous, it saves a reference count
152
+ /// increment/decrement if *this is already contiguous, at the cost
153
+ /// in all cases of an extra pointer of stack usage, an extra branch
154
+ /// to access, and an extra branch at destruction time.
155
+ c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const &;
156
+
157
+ // Use .contiguous() instead. Trying to borrow from a prvalue Tensor
158
+ // will only lead to trouble and dangling references.
159
+ c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete;
160
+
161
+ // The following overloads are very intruiging. Consider the following
162
+ // program:
163
+ //
164
+ // x[1] = 3;
165
+ //
166
+ // We would expect that the first entry of x is written to 3. But how can we
167
+ // actually achieve this? x[1] evaluates to a tensor...
168
+ //
169
+ // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be
170
+ // (profitably) assigned to in the traditional sense, so we overload
171
+ // assignment to mean, "Actually, copy 3 into the tensor data." This is done
172
+ // with an rvalue-reference ref-qualified overload (the methods with && at the
173
+ // end of their type.)
174
+ //
175
+ // There's one more fly in the ointment: We also want
176
+ //
177
+ // Tensor x = y;
178
+ //
179
+ // to work, and we want it NOT to copy. So we need a traditional operator=
180
+ // overload. But we MUST specify a mutable lvalue ref-qualifier, to
181
+ // disambiguate the traditional overload from the rvalue-reference
182
+ // ref-qualified overload. Otherwise, it will be ambiguous, because
183
+ // a non ref-qualified method is eligible for all situations.
184
+
185
+ // Unfortunately, we have to write these constructors out manually
186
+ // to work around an MSVC bug:
187
+ // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &':
188
+ // multiple versions of a defaulted special member functions are not allowed
189
+ // Tensor& operator=(const Tensor&) & = default;
190
+ // Tensor& operator=(Tensor&&) & = default;
191
+
192
+ // Also MSVC will wrongly issue the following warning with the aforementioned fix
193
+ // warning C4522: 'at::Tensor': multiple assignment operators specified
194
+ // Let's just skip the warning.
195
+ //
196
+ // TODO: temporarily disabled
197
+
198
+ Tensor& operator=(const TensorBase& x) & {
199
+ impl_ = x.getIntrusivePtr();
200
+ return *this;
201
+ }
202
+ Tensor& operator=(TensorBase&& x) & noexcept {
203
+ impl_ = x.unsafeReleaseIntrusivePtr();
204
+ return *this;
205
+ }
206
+
207
+ Tensor& operator=(const Tensor &x) & {
208
+ return operator=(static_cast<const TensorBase&>(x));
209
+ }
210
+ Tensor& operator=(Tensor &&x) & noexcept {
211
+ return operator=(static_cast<TensorBase&&>(x));
212
+ }
213
+
214
+ Tensor& operator=(const Scalar &v) && {
215
+ return fill_(v);
216
+ }
217
+ Tensor& operator=(const Tensor &rhs) && {
218
+ return copy_(rhs);
219
+ }
220
+ Tensor& operator=(Tensor&& rhs) && {
221
+ return copy_(rhs);
222
+ }
223
+
224
+ C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
225
+ DeprecatedTypeProperties & type() const {
226
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
227
+ dispatchKeyToBackend(legacyExtractDispatchKey(key_set())),
228
+ scalar_type());
229
+ }
230
+
231
+ Tensor toType(ScalarType t) const {
232
+ return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false);
233
+ }
234
+
235
+ // TODO: Deprecate me
236
+ Tensor toBackend(Backend b) const {
237
+ return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false);
238
+ }
239
+
240
+ C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")
241
+ bool is_variable() const noexcept {
242
+ return !at::impl::variable_excluded_from_dispatch();
243
+ }
244
+
245
+ template<typename T>
246
+ C10_DEPRECATED_MESSAGE("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.")
247
+ T * data() const {
248
+ return data_ptr<T>();
249
+ }
250
+
251
+ template <typename T>
252
+ T item() const;
253
+
254
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
255
+ C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
256
+ GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const & {
257
+ return generic_packed_accessor<T,N,PtrTraits,index_t>();
258
+ }
259
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
260
+ C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
261
+ GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() && = delete;
262
+
263
+ Tensor operator~() const {
264
+ return bitwise_not();
265
+ }
266
+ Tensor operator-() const {
267
+ return neg();
268
+ }
269
+ Tensor& operator+=(const Tensor & other) {
270
+ return add_(other);
271
+ }
272
+ Tensor& operator+=(const Scalar & other) {
273
+ return add_(other);
274
+ }
275
+ Tensor& operator-=(const Tensor & other) {
276
+ return sub_(other);
277
+ }
278
+ Tensor& operator-=(const Scalar & other) {
279
+ return sub_(other);
280
+ }
281
+ Tensor& operator*=(const Tensor & other) {
282
+ return mul_(other);
283
+ }
284
+ Tensor& operator*=(const Scalar & other) {
285
+ return mul_(other);
286
+ }
287
+ Tensor& operator/=(const Tensor & other) {
288
+ return div_(other);
289
+ }
290
+ Tensor& operator/=(const Scalar & other) {
291
+ return div_(other);
292
+ }
293
+ Tensor& operator&=(const Tensor & other) {
294
+ return bitwise_and_(other);
295
+ }
296
+ Tensor& operator|=(const Tensor & other) {
297
+ return bitwise_or_(other);
298
+ }
299
+ Tensor& operator^=(const Tensor & other) {
300
+ return bitwise_xor_(other);
301
+ }
302
+ Tensor operator[](const Scalar & index) const {
303
+ if (!index.isIntegral(false)) {
304
+ TORCH_CHECK_INDEX(false, "Can only index tensors with integral scalars");
305
+ }
306
+ return this->operator[](index.toLong());
307
+ }
308
+ Tensor operator[](const Tensor & index) const {
309
+ // These properties are checked in the Scalar constructor, but we already
310
+ // check them here to provide more useful diagnostics for the user.
311
+ if (!index.defined()) {
312
+ TORCH_CHECK_INDEX(false, "Can only index with tensors that are defined");
313
+ }
314
+ if (index.dim() != 0) {
315
+ TORCH_CHECK_INDEX(false,
316
+ "Can only index with tensors that are scalars (zero-dim)");
317
+ }
318
+ // The Scalar(Tensor) constructor is explicit, so we need to call it.
319
+ return this->operator[](index.item());
320
+ }
321
+ Tensor operator[](int64_t index) const {
322
+ return select(0, index);
323
+ }
324
+
325
+ Tensor index(ArrayRef<at::indexing::TensorIndex> indices) const;
326
+ Tensor index(std::initializer_list<at::indexing::TensorIndex> indices) const;
327
+
328
+ Tensor & index_put_(ArrayRef<at::indexing::TensorIndex> indices, Tensor const & rhs);
329
+ Tensor & index_put_(ArrayRef<at::indexing::TensorIndex> indices, const Scalar& v);
330
+ Tensor & index_put_(std::initializer_list<at::indexing::TensorIndex> indices, Tensor const & rhs);
331
+ Tensor & index_put_(std::initializer_list<at::indexing::TensorIndex> indices, const Scalar& v);
332
+
333
+ Tensor cpu() const {
334
+ return to(options().device(c10::DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false);
335
+ }
336
+
337
+ // TODO: The Python version also accepts arguments
338
+ Tensor cuda() const {
339
+ return to(options().device(c10::DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false);
340
+ }
341
+
342
+ Tensor hip() const {
343
+ return to(options().device(c10::DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false);
344
+ }
345
+
346
+ Tensor ve() const {
347
+ return to(options().device(c10::DeviceType::VE), /*non_blocking*/ false, /*copy*/ false);
348
+ }
349
+
350
+ Tensor vulkan() const {
351
+ return to(options().device(c10::DeviceType::Vulkan), /*non_blocking*/ false, /*copy*/ false);
352
+ }
353
+
354
+ Tensor metal() const {
355
+ return to(options().device(c10::DeviceType::Metal), /*non_blocking*/ false, /*copy*/ false);
356
+ }
357
+
358
+ Tensor meta() const {
359
+ return to(options().device(c10::DeviceType::Meta), /*non_blocking*/ false, /*copy*/ false);
360
+ }
361
+
362
+ // ~~~~~ Autograd API ~~~~~
363
+
364
+ /// \fn bool is_leaf() const;
365
+ ///
366
+ /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention.
367
+ ///
368
+ /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were
369
+ /// created by the user. This means that they are not the result of an operation and so
370
+ /// `grad_fn()` is `nullptr`.
371
+ ///
372
+ /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`.
373
+ /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`.
374
+ ///
375
+ /// Example:
376
+ /// @code
377
+ /// auto a = torch::rand(10, torch::requires_grad());
378
+ /// std::cout << a.is_leaf() << std::endl; // prints `true`
379
+ ///
380
+ /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
381
+ /// std::cout << b.is_leaf() << std::endl; // prints `false`
382
+ /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor
383
+ ///
384
+ /// auto c = torch::rand(10, torch::requires_grad()) + 2;
385
+ /// std::cout << c.is_leaf() << std::endl; // prints `false`
386
+ /// // c was created by the addition operation
387
+ ///
388
+ /// auto d = torch::rand(10).cuda();
389
+ /// std::cout << d.is_leaf() << std::endl; // prints `true`
390
+ /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
391
+ ///
392
+ /// auto e = torch::rand(10).cuda().requires_grad_();
393
+ /// std::cout << e.is_leaf() << std::endl; // prints `true`
394
+ /// // e requires gradients and has no operations creating it
395
+ ///
396
+ /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
397
+ /// std::cout << f.is_leaf() << std::endl; // prints `true`
398
+ /// // f requires grad, has no operation creating it
399
+ /// @endcode
400
+
401
+ /// \fn void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
402
+ ///
403
+ /// Computes the gradient of current tensor with respect to graph leaves.
404
+ ///
405
+ /// The graph is differentiated using the chain rule. If the tensor is
406
+ /// non-scalar (i.e. its data has more than one element) and requires
407
+ /// gradient, the function additionally requires specifying ``gradient``.
408
+ /// It should be a tensor of matching type and location, that contains
409
+ /// the gradient of the differentiated function w.r.t. this Tensor.
410
+ ///
411
+ /// This function accumulates gradients in the leaves - you might need to
412
+ /// zero them before calling it.
413
+ ///
414
+ /// \param gradient Gradient w.r.t. the
415
+ /// tensor. If it is a tensor, it will be automatically converted
416
+ /// to a Tensor that does not require grad unless ``create_graph`` is True.
417
+ /// None values can be specified for scalar Tensors or ones that
418
+ /// don't require grad. If a None value would be acceptable then
419
+ /// this argument is optional.
420
+ /// \param retain_graph If ``false``, the graph used to compute
421
+ /// the grads will be freed. Note that in nearly all cases setting
422
+ /// this option to True is not needed and often can be worked around
423
+ /// in a much more efficient way. Defaults to the value of
424
+ /// ``create_graph``.
425
+ /// \param create_graph If ``true``, graph of the derivative will
426
+ /// be constructed, allowing to compute higher order derivative
427
+ /// products. Defaults to ``false``.
428
+ /// \param inputs Inputs w.r.t. which the gradient will be accumulated into
429
+ /// ``at::Tensor::grad``. All other Tensors will be ignored. If not
430
+ /// provided, the gradient is accumulated into all the leaf Tensors
431
+ /// that were used to compute the current tensor.
432
+ /// When inputs are provided and a given input is not a leaf,
433
+ /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
434
+ /// It is an implementation detail on which the user should not rely.
435
+ /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
436
+ void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const {
437
+ // NB: Adding this wrapper to _backward here because we'd like our
438
+ // 'backwards' api to accept the 'inputs' argument optionally. Since code gen
439
+ // currently does not support optional of TensorList our approach is to replace
440
+ // backward in native_functions.yaml with _backward and call it here instead.
441
+ if (inputs.has_value()) {
442
+ TORCH_CHECK(inputs.value().size() > 0, "'inputs' argument to backward cannot be empty")
443
+ this->_backward(inputs.value(), gradient, retain_graph, create_graph);
444
+ } else {
445
+ this->_backward({}, gradient, retain_graph, create_graph);
446
+ }
447
+ }
448
+
449
+ /// \fn Tensor detach() const;
450
+ ///
451
+ /// Returns a new Tensor, detached from the current graph.
452
+ /// The result will never require gradient.
453
+
454
+ /// \fn Tensor & detach_() const;
455
+ ///
456
+ /// Detaches the Tensor from the graph that created it, making it a leaf.
457
+ /// Views cannot be detached in-place.
458
+
459
+ /// \fn void retain_grad() const;
460
+ ///
461
+ /// Enables this Tensor to have their :attr:`grad` populated during
462
+ /// :func:`backward`. This is a no-op for leaf tensors.
463
+
464
+ /// \fn bool retains_grad() const;
465
+ ///
466
+ /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
467
+ /// populated during :func:`backward`, ``false`` otherwise.
468
+
469
+ const Tensor& set_requires_grad(bool requires_grad) const {
470
+ TensorBase::set_requires_grad(requires_grad);
471
+ return *this;
472
+ }
473
+
474
+ /// Return a mutable reference to the gradient. This is conventionally
475
+ /// used as `t.grad() = x` to set a gradient to a completely new tensor.
476
+ /// Note that this function work with a non-const Tensor and is not
477
+ /// thread safe.
478
+ Tensor& mutable_grad() const {
479
+ return impl_->mutable_grad();
480
+ }
481
+
482
+ /// This function returns an undefined tensor by default and returns a defined tensor
483
+ /// the first time a call to `backward()` computes gradients for this Tensor.
484
+ /// The attribute will then contain the gradients computed and future calls
485
+ /// to `backward()` will accumulate (add) gradients into it.
486
+ const Tensor& grad() const {
487
+ const Tensor& maybe_grad = impl_->grad();
488
+ if (!is_leaf() && !retains_grad() && !maybe_grad.defined()) {
489
+ TORCH_WARN(
490
+ "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad "
491
+ "attribute won't be populated during autograd.backward(). If you indeed want the .grad "
492
+ "field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. "
493
+ "If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor "
494
+ "instead. See github.com/pytorch/pytorch/pull/30531 for more informations.");
495
+ }
496
+ return maybe_grad;
497
+ }
498
+
499
+ // The Forward AD API functions below are low level and are not to be used by end
500
+ // users who should use the API provided in torch/csrc/autograd.h
501
+
502
+ /// This function returns the forward gradient for this Tensor at the given level.
503
+ const Tensor& _fw_grad(uint64_t level) const {
504
+ return impl_->_fw_grad(level, *this);
505
+ }
506
+
507
+ /// This function can be used to set the value of the forward grad.
508
+ /// Note that the given new_grad might not be used directly if it has different
509
+ /// metadata (size/stride/storage offset) compared to this Tensor. In that case,
510
+ /// new_grad content will be copied into a new Tensor
511
+ void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const {
512
+ impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op);
513
+ }
514
+
515
+
516
+ // STOP. Thinking of adding a method here, which only makes use
517
+ // of other ATen methods? Define it in native_functions.yaml.
518
+
519
+ //example
520
+ //Tensor * add(Tensor & b);
521
+ ${tensor_method_declarations}
522
+
523
+ // Special C++ only overloads for std()-like functions (See gh-40287)
524
+ // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
525
+ // So, for example std(0) would select the std(unbiased=False) overload
526
+
527
+ Tensor var(int dim) const {
528
+ return var(IntArrayRef{dim});
529
+ }
530
+
531
+ Tensor std(int dim) const {
532
+ return std(IntArrayRef{dim});
533
+ }
534
+
535
+ // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the
536
+ // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet.
537
+ // Before that change, we make this method to maintain BC for C++ usage like
538
+ // `x.to(y.dtype)`.
539
+ // TODO: remove following two after at::kDouble and its friends are TypeMeta's.
540
+ inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
541
+ return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
542
+ }
543
+ inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
544
+ return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
545
+ }
546
+
547
+ template <typename F, typename... Args>
548
+ decltype(auto) m(F func, Args&&... params) const {
549
+ return func(*this, std::forward<Args>(params)...);
550
+ }
551
+
552
+ /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended
553
+ /// to be used from functions that need to access the `Variable`'s equivalent `Tensor`
554
+ /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`).
555
+ ///
556
+ /// One notable difference with the legacy `.data()` function is that changes to the
557
+ /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset)
558
+ /// will not update the original `Variable`, due to the fact that this function
559
+ /// shallow-copies the `Variable`'s underlying TensorImpl.
560
+ at::Tensor tensor_data() const {
561
+ return TensorBase::tensor_data();
562
+ }
563
+
564
+ /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data`
565
+ /// in Python, which create a new `Variable` that shares the same storage and
566
+ /// tensor metadata with the original `Variable`, but with a completely new
567
+ /// autograd history.
568
+ ///
569
+ /// NOTE: If we change the tensor metadata (e.g. sizes / strides /
570
+ /// storage / storage_offset) of a variable created from `var.variable_data()`, those
571
+ /// changes will not update the original variable `var`. In `.variable_data()`, we set
572
+ /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal,
573
+ /// in order to prevent users from changing metadata of `var.variable_data()`
574
+ /// and expecting the original variable `var` to also be updated.
575
+ at::Tensor variable_data() const {
576
+ return TensorBase::variable_data();
577
+ }
578
+
579
+ // Hooks
580
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
581
+
582
+ template <typename T>
583
+ using hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, Tensor>>::value, unsigned>;
584
+ template <typename T>
585
+ using hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, Tensor>, Tensor>::value, unsigned>;
586
+
587
+ /// Registers a backward hook.
588
+ ///
589
+ /// The hook will be called every time a gradient with respect to the Tensor is computed.
590
+ /// The hook should have one of the following signature:
591
+ /// ```
592
+ /// hook(Tensor grad) -> Tensor
593
+ /// ```
594
+ /// ```
595
+ /// hook(Tensor grad) -> void
596
+ /// ```
597
+ /// The hook should not modify its argument, but it can optionally return a new gradient
598
+ /// which will be used in place of `grad`.
599
+ ///
600
+ /// This function returns the index of the hook in the list which can be used to remove hook.
601
+ ///
602
+ /// Example:
603
+ /// @code
604
+ /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
605
+ /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
606
+ /// v.backward(torch::tensor({1., 2., 3.}));
607
+ /// // This prints:
608
+ /// // ```
609
+ /// // 2
610
+ /// // 4
611
+ /// // 6
612
+ /// // [ CPUFloatType{3} ]
613
+ /// // ```
614
+ /// std::cout << v.grad() << std::endl;
615
+ /// v.remove_hook(h); // removes the hook
616
+ /// @endcode
617
+ template <typename T>
618
+ hook_return_void_t<T> register_hook(T&& hook) const;
619
+ template <typename T>
620
+ hook_return_var_t<T> register_hook(T&& hook) const;
621
+
622
+ // Variable methods
623
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
624
+
625
+ Tensor data() const {
626
+ return TensorBase::data();
627
+ }
628
+
629
+ void _backward(TensorList inputs, const c10::optional<Tensor>& gradient, c10::optional<bool> keep_graph, bool create_graph) const;
630
+
631
+ const Tensor& requires_grad_(bool _requires_grad=true) const {
632
+ TensorBase::requires_grad_(_requires_grad);
633
+ return *this;
634
+ }
635
+ };
636
+
637
+ namespace detail {
638
+ // Helper creator for Tensor class which doesn't requires the users to pass
639
+ // in an intrusive_ptr instead it just converts the argument passed to
640
+ // requested intrusive_ptr type.
641
+ template <typename T, typename... Args>
642
+ Tensor make_tensor(Args&&... args) {
643
+ return Tensor(c10::make_intrusive<T>(std::forward<Args>(args)...));
644
+ }
645
+
646
+ } // namespace detail
647
+
648
+ } // namespace at
649
+
650
+
651
+ namespace at {
652
+ ${tensor_method_definitions}
653
+ } // namespace at
654
+
655
+
656
+ namespace c10 {
657
+ template <>
658
+ struct MaybeOwnedTraits<at::Tensor> {
659
+ using owned_type = at::Tensor;
660
+ using borrow_type = at::Tensor;
661
+
662
+ static borrow_type createBorrow(const owned_type& from) {
663
+ // NOTE: this can be implemented without the special
664
+ // unsafe_borrow_t Tensor constructor as
665
+ //
666
+ // return borrow_type(c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(from.unsafeGetTensorImpl()));
667
+ //
668
+ // but that hurts inlining due to the nullptr check in the
669
+ // Tensor(c10::intrusive_ptr<...>) constructor. We already know
670
+ // that from.impl_ isn't null because from is a valid Tensor, so
671
+ // we needn't do the check again. (using __builtin_assume can
672
+ // avoid this, but wouldn't be portable to MSVC.)
673
+ return borrow_type(borrow_type::unsafe_borrow_t{}, from);
674
+ }
675
+
676
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
677
+ lhs.unsafeReleaseTensorImpl();
678
+ // See above note: this can be implemented with public API
679
+ // similarly to createBorrow(), but that would hurt inlining.
680
+ lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
681
+ }
682
+
683
+ static void destroyBorrow(borrow_type& toDestroy) {
684
+ toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0.
685
+ }
686
+
687
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
688
+ return borrow;
689
+ }
690
+
691
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
692
+ return &borrow;
693
+ }
694
+
695
+ static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
696
+ return true;
697
+ }
698
+ };
699
+
700
+ template <>
701
+ struct ExclusivelyOwnedTraits<at::Tensor> {
702
+ using repr_type = at::Tensor;
703
+ using pointer_type = at::Tensor*;
704
+ using const_pointer_type = const at::Tensor*;
705
+
706
+ static repr_type nullRepr() {
707
+ return at::Tensor();
708
+ }
709
+
710
+ template <class... Args>
711
+ static repr_type createInPlace(Args&&... args) {
712
+ return at::Tensor(std::forward<Args>(args)...);
713
+ }
714
+
715
+ static repr_type moveToRepr(at::Tensor&& x) {
716
+ return std::move(x);
717
+ }
718
+
719
+ static void destroyOwned(at::Tensor& x) {
720
+ return ExclusivelyOwnedTraits<at::TensorBase>::destroyOwned(x);
721
+ }
722
+
723
+ static at::Tensor take(at::Tensor& x) {
724
+ return std::move(x);
725
+ }
726
+
727
+ static pointer_type getImpl(repr_type& x) {
728
+ return &x;
729
+ }
730
+
731
+ static const_pointer_type getImpl(const repr_type& x) {
732
+ return &x;
733
+ }
734
+ };
735
+ } // namespace c10
736
+
737
+ namespace at {
738
+
739
+ inline c10::MaybeOwned<Tensor> borrow_from_optional_tensor(
740
+ const c10::optional<Tensor>& opt) {
741
+ return opt.has_value()
742
+ ? c10::MaybeOwned<Tensor>::borrowed(*opt)
743
+ : c10::MaybeOwned<Tensor>::owned(std::in_place);
744
+ }
745
+
746
+ inline c10::MaybeOwned<Tensor> Tensor::expect_contiguous(MemoryFormat memory_format) const & {
747
+ if (is_contiguous(memory_format)) {
748
+ return c10::MaybeOwned<Tensor>::borrowed(*this);
749
+ } else {
750
+ return c10::MaybeOwned<Tensor>::owned(__dispatch_contiguous(memory_format));
751
+ }
752
+ }
753
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/Scalar.h>
2
+ #include <ATen/core/TensorBody.h>
3
+
4
+ #include <c10/util/string_view.h>
5
+
6
+ namespace at {
7
+
8
+ namespace {
9
+
10
+ // Verifies the requested type is the same as the Tensor's type.
11
+ void check_type(const TensorBase& tensor, ScalarType type, c10::string_view type_name) {
12
+ TORCH_CHECK(
13
+ tensor.scalar_type() == type
14
+ || (isQIntType(tensor.scalar_type())
15
+ && toUnderlying(tensor.scalar_type()) == type),
16
+ "expected scalar type ", type_name, " but found ", tensor.scalar_type());
17
+ }
18
+
19
+ } // namespace
20
+
21
+ #define DEFINE_CAST(T, name) \
22
+ template <> \
23
+ TORCH_API const T* TensorBase::const_data_ptr() const { \
24
+ check_type(*this, ScalarType::name, #name); \
25
+ return this->unsafeGetTensorImpl()->data_ptr_impl<T>(); \
26
+ } \
27
+ \
28
+ template <> \
29
+ TORCH_API const T* TensorBase::const_data_ptr<const T>() const { \
30
+ check_type(*this, ScalarType::name, #name); \
31
+ return this->unsafeGetTensorImpl()->data_ptr_impl<std::remove_const_t<T>>(); \
32
+ } \
33
+ \
34
+ template <> \
35
+ TORCH_API T* TensorBase::mutable_data_ptr() const { \
36
+ check_type(*this, ScalarType::name, #name); \
37
+ return this->unsafeGetTensorImpl()->mutable_data_ptr_impl<T>(); \
38
+ } \
39
+ \
40
+ template <> \
41
+ TORCH_API T* TensorBase::data_ptr() const { \
42
+ return mutable_data_ptr<T>(); \
43
+ } \
44
+
45
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CAST)
46
+ AT_FORALL_QINT_TYPES(DEFINE_CAST)
47
+ DEFINE_CAST(uint16_t, UInt16)
48
+ DEFINE_CAST(uint32_t, UInt32)
49
+ DEFINE_CAST(uint64_t, UInt64)
50
+ #undef DEFINE_CAST
51
+
52
+ #define DEFINE_ITEM(T, name) \
53
+ template <> \
54
+ TORCH_API T Tensor::item() const { \
55
+ return item().to##name(); \
56
+ }
57
+
58
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ITEM)
59
+ #undef DEFINE_ITEM
60
+
61
+ } //namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPU.cpp ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_NO_OPERATORS
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/TensorIterator.h>
5
+ #include <ATen/TensorMeta.h>
6
+
7
+ namespace at {
8
+
9
+ // NB: this is explicitly copied here (via codegen) rather than
10
+ // included via NativeFunctions.h to avoid recompiling this file when
11
+ // NativeFunctions.h changes
12
+ namespace meta {
13
+ ${meta_declaration}
14
+ }
15
+
16
+ namespace native {
17
+ ${native_declaration}
18
+ ${native_definitions}
19
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPUKernel.cpp ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_NO_OPERATORS
2
+
3
+ #include <ATen/native/ufunc/${name}.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <ATen/TensorIterator.h>
6
+ #include <ATen/native/cpu/Loops.h>
7
+ #include <ATen/cpu/vec/vec.h>
8
+ #include <ATen/Dispatch.h>
9
+ #include <c10/core/Scalar.h>
10
+
11
+ namespace at {
12
+ namespace native {
13
+ ${native_definitions}
14
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // ${generated_comment}
2
+
3
+ // Generated by tools/jit/gen_unboxing.py. This file declares code generated boxed C++ functions for operators,
4
+ // base off of native_functions.yaml (or similar yaml file with the same syntax). The definition of such a boxed
5
+ // function will pop out IValues from the stack then convert them into the correct C++ types based on given schema. This
6
+ // unboxing logic is an alternative to template-based metaprogramming unboxing.
7
+
8
+ #pragma once
9
+
10
+ #include <ATen/ATen.h>
11
+ namespace at {
12
+ namespace unboxing {
13
+ namespace {
14
+
15
+ template<typename T, size_t N>
16
+ std::array<T, N> as_array(const c10::List<c10::IValue>& list) {
17
+ std::array<T, N> res;
18
+ AT_ASSERT(list.size() == N);
19
+ std::vector<T> vec;
20
+ for (c10::IValue elem : list) {
21
+ vec.push_back(elem.to<T>());
22
+ }
23
+ std::copy(vec.begin(), vec.end(), res.begin());
24
+ return res;
25
+ }
26
+ } // namespace <anonymous>
27
+ using Stack = std::vector<c10::IValue>;
28
+ // Generated function declaration
29
+ ${declarations}
30
+
31
+ } // namespace unboxing
32
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/enum_tag.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ namespace at {
6
+ // Enum of valid tags obtained from the entries in tags.yaml
7
+ enum class Tag {
8
+ ${enum_of_valid_tags}
9
+ };
10
+ }
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ load("//:tools/bazel.bzl", "rules")
2
+ load(":build.bzl", "define_targets")
3
+
4
+ define_targets(rules = rules)
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ If you add a file to this directory, you **MUST** update
2
+ `torch/CMakeLists.txt` and add the file as a dependency to
3
+ the `add_custom_command` call.
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def define_targets(rules):
2
+ rules.py_library(
3
+ name = "autograd",
4
+ srcs = rules.glob(["*.py"]),
5
+ data = rules.glob([
6
+ "*.yaml",
7
+ "templates/*",
8
+ ]),
9
+ visibility = ["//:__subpackages__"],
10
+ deps = [
11
+ rules.requirement("PyYAML"),
12
+ "//torchgen",
13
+ ],
14
+ )
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from typing import Callable
3
+
4
+ from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI
5
+ from torchgen.context import native_function_manager
6
+ from torchgen.utils import T
7
+
8
+
9
+ # Like tools.api.context.with_native_function, but for
10
+ # NativeFunctionWithDifferentiabilityInfo.
11
+ def with_native_function_with_differentiability_info(
12
+ func: Callable[[NFWDI], T]
13
+ ) -> Callable[[NFWDI], T]:
14
+ @functools.wraps(func)
15
+ def wrapper(f: NFWDI) -> T:
16
+ with native_function_manager(f.func):
17
+ return func(f)
18
+
19
+ return wrapper
20
+
21
+
22
+ # Like the above but with an additional dispatch key string argument
23
+ def with_native_function_with_differentiability_info_and_key(
24
+ func: Callable[[NFWDI, str], T]
25
+ ) -> Callable[[NFWDI, str], T]:
26
+ @functools.wraps(func)
27
+ def wrapper(f: NFWDI, key: str) -> T:
28
+ with native_function_manager(f.func):
29
+ return func(f, key)
30
+
31
+ return wrapper
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/deprecated.yaml ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deprecated function signatures. These are exposed in Python, but not included
2
+ # in the error message suggestions.
3
+
4
+ - name: add(Tensor self, Scalar alpha, Tensor other) -> Tensor
5
+ aten: add(self, other, alpha)
6
+
7
+ - name: add_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
8
+ aten: add_(self, other, alpha)
9
+
10
+ - name: add(Tensor self, Scalar alpha, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11
+ aten: add_out(out, self, other, alpha)
12
+
13
+ - name: addbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
14
+ aten: addbmm(self, batch1, batch2, beta, alpha)
15
+
16
+ - name: addbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
17
+ aten: addbmm_(self, batch1, batch2, beta, alpha)
18
+
19
+ - name: addbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
20
+ aten: addbmm_out(out, self, batch1, batch2, beta, alpha)
21
+
22
+ - name: addbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
23
+ aten: addbmm(self, batch1, batch2, beta, 1)
24
+
25
+ - name: addbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
26
+ aten: addbmm_(self, batch1, batch2, beta, 1)
27
+
28
+ - name: addbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
29
+ aten: addbmm_out(out, self, batch1, batch2, beta, 1)
30
+
31
+ - name: addcdiv(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
32
+ aten: addcdiv(self, tensor1, tensor2, value)
33
+
34
+ - name: addcdiv_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
35
+ aten: addcdiv_(self, tensor1, tensor2, value)
36
+
37
+ - name: addcdiv(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor(a!) out) -> Tensor(a!)
38
+ aten: addcdiv_out(out, self, tensor1, tensor2, value)
39
+
40
+ - name: addcmul(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
41
+ aten: addcmul(self, tensor1, tensor2, value)
42
+
43
+ - name: addcmul_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
44
+ aten: addcmul_(self, tensor1, tensor2, value)
45
+
46
+ - name: addcmul(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor(a!) out) -> Tensor(a!)
47
+ aten: addcmul_out(out, self, tensor1, tensor2, value)
48
+
49
+ - name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
50
+ aten: addmm(self, mat1, mat2, beta, alpha)
51
+
52
+ - name: addmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor(a!)
53
+ aten: addmm_(self, mat1, mat2, beta, alpha)
54
+
55
+ - name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
56
+ aten: addmm_out(out, self, mat1, mat2, beta, alpha)
57
+
58
+ - name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
59
+ aten: addmm(self, mat1, mat2, beta, 1)
60
+
61
+ - name: addmm_(Scalar beta, Tensor(a!) self, Tensor mat1, Tensor mat2) -> Tensor(a!)
62
+ aten: addmm_(self, mat1, mat2, beta, 1)
63
+
64
+ - name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
65
+ aten: addmm_out(out, self, mat1, mat2, beta, 1)
66
+
67
+ - name: sspaddmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
68
+ aten: sspaddmm(self, mat1, mat2, beta, alpha)
69
+
70
+ - name: sspaddmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
71
+ aten: sspaddmm(self, mat1, mat2, beta, 1)
72
+
73
+ - name: addmv(Scalar beta, Tensor self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor
74
+ aten: addmv(self, mat, vec, beta, alpha)
75
+
76
+ - name: addmv_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor(a!)
77
+ aten: addmv_(self, mat, vec, beta, alpha)
78
+
79
+ - name: addmv(Scalar beta, Tensor self, Scalar alpha, Tensor mat, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
80
+ aten: addmv_out(out, self, mat, vec, beta, alpha)
81
+
82
+ - name: addmv(Scalar beta, Tensor self, Tensor mat, Tensor vec) -> Tensor
83
+ aten: addmv(self, mat, vec, beta, 1)
84
+
85
+ - name: addmv_(Scalar beta, Tensor(a!) self, Tensor mat, Tensor vec) -> Tensor(a!)
86
+ aten: addmv_(self, mat, vec, beta, 1)
87
+
88
+ - name: addmv(Scalar beta, Tensor self, Tensor mat, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
89
+ aten: addmv_out(out, self, mat, vec, beta, 1)
90
+
91
+ - name: addr(Scalar beta, Tensor self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor
92
+ aten: addr(self, vec1, vec2, beta, alpha)
93
+
94
+ - name: addr_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor(a!)
95
+ aten: addr_(self, vec1, vec2, beta, alpha)
96
+
97
+ - name: addr(Scalar beta, Tensor self, Scalar alpha, Tensor vec1, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
98
+ aten: addr_out(out, self, vec1, vec2, beta, alpha)
99
+
100
+ - name: addr(Scalar beta, Tensor self, Tensor vec1, Tensor vec2) -> Tensor
101
+ aten: addr(self, vec1, vec2, beta, 1)
102
+
103
+ - name: addr_(Scalar beta, Tensor(a!) self, Tensor vec1, Tensor vec2) -> Tensor(a!)
104
+ aten: addr_(self, vec1, vec2, beta, 1)
105
+
106
+ - name: addr(Scalar beta, Tensor self, Tensor vec1, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
107
+ aten: addr_out(out, self, vec1, vec2, beta, 1)
108
+
109
+ - name: baddbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
110
+ aten: baddbmm(self, batch1, batch2, beta, alpha)
111
+
112
+ - name: baddbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
113
+ aten: baddbmm_(self, batch1, batch2, beta, alpha)
114
+
115
+ - name: baddbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
116
+ aten: baddbmm_out(out, self, batch1, batch2, beta, alpha)
117
+
118
+ - name: baddbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
119
+ aten: baddbmm(self, batch1, batch2, beta, 1)
120
+
121
+ - name: baddbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
122
+ aten: baddbmm_(self, batch1, batch2, beta, 1)
123
+
124
+ - name: baddbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
125
+ aten: baddbmm_out(out, self, batch1, batch2, beta, 1)
126
+
127
+ - name: sub(Tensor self, Scalar alpha, Tensor other) -> Tensor
128
+ aten: sub(self, other, alpha)
129
+
130
+ - name: sub_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
131
+ aten: sub_(self, other, alpha)
132
+
133
+ - name: sub(Tensor self, Scalar alpha, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
134
+ aten: sub_out(out, self, other, alpha)
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/derivatives.yaml ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ For procedural tests needed for __torch_function__, we use this function
3
+ to export method names and signatures as needed by the tests in
4
+ test/test_overrides.py.
5
+
6
+ python -m tools.autograd.gen_annotated_fn_args \
7
+ aten/src/ATen/native/native_functions.yaml \
8
+ aten/src/ATen/native/tags.yaml \
9
+ $OUTPUT_DIR \
10
+ tools/autograd
11
+
12
+ Where $OUTPUT_DIR is where you would like the files to be
13
+ generated. In the full build system, OUTPUT_DIR is
14
+ torch/testing/_internal/generated
15
+ """
16
+
17
+ import argparse
18
+ import os
19
+ import textwrap
20
+ from collections import defaultdict
21
+
22
+ from typing import Any, Dict, List, Sequence
23
+
24
+ import torchgen.api.python as python
25
+ from torchgen.context import with_native_function
26
+
27
+ from torchgen.gen import parse_native_yaml
28
+ from torchgen.model import Argument, BaseOperatorName, NativeFunction
29
+ from torchgen.utils import FileManager
30
+
31
+ from .gen_python_functions import (
32
+ is_py_fft_function,
33
+ is_py_linalg_function,
34
+ is_py_nn_function,
35
+ is_py_special_function,
36
+ is_py_torch_function,
37
+ is_py_variable_method,
38
+ should_generate_py_binding,
39
+ )
40
+
41
+
42
+ def gen_annotated(
43
+ native_yaml_path: str, tags_yaml_path: str, out: str, autograd_dir: str
44
+ ) -> None:
45
+ native_functions = parse_native_yaml(
46
+ native_yaml_path, tags_yaml_path
47
+ ).native_functions
48
+ mappings = (
49
+ (is_py_torch_function, "torch._C._VariableFunctions"),
50
+ (is_py_nn_function, "torch._C._nn"),
51
+ (is_py_linalg_function, "torch._C._linalg"),
52
+ (is_py_special_function, "torch._C._special"),
53
+ (is_py_fft_function, "torch._C._fft"),
54
+ (is_py_variable_method, "torch.Tensor"),
55
+ )
56
+ annotated_args: List[str] = []
57
+ for pred, namespace in mappings:
58
+ groups: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list)
59
+ for f in native_functions:
60
+ if not should_generate_py_binding(f) or not pred(f):
61
+ continue
62
+ groups[f.func.name.name].append(f)
63
+ for group in groups.values():
64
+ for f in group:
65
+ annotated_args.append(f"{namespace}.{gen_annotated_args(f)}")
66
+
67
+ template_path = os.path.join(autograd_dir, "templates")
68
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
69
+ fm.write_with_template(
70
+ "annotated_fn_args.py",
71
+ "annotated_fn_args.py.in",
72
+ lambda: {
73
+ "annotated_args": textwrap.indent("\n".join(annotated_args), " "),
74
+ },
75
+ )
76
+
77
+
78
+ @with_native_function
79
+ def gen_annotated_args(f: NativeFunction) -> str:
80
+ def _get_kwargs_func_exclusion_list() -> List[str]:
81
+ # functions that currently don't work with kwargs in test_overrides.py
82
+ return [
83
+ "diagonal",
84
+ "round_",
85
+ "round",
86
+ "scatter_",
87
+ ]
88
+
89
+ def _add_out_arg(
90
+ out_args: List[Dict[str, Any]], args: Sequence[Argument], *, is_kwarg_only: bool
91
+ ) -> None:
92
+ for arg in args:
93
+ if arg.default is not None:
94
+ continue
95
+ out_arg: Dict[str, Any] = {}
96
+ out_arg["is_kwarg_only"] = str(is_kwarg_only)
97
+ out_arg["name"] = arg.name
98
+ out_arg["simple_type"] = python.argument_type_str(
99
+ arg.type, simple_type=True
100
+ )
101
+ size_t = python.argument_type_size(arg.type)
102
+ if size_t:
103
+ out_arg["size"] = size_t
104
+ out_args.append(out_arg)
105
+
106
+ out_args: List[Dict[str, Any]] = []
107
+ _add_out_arg(out_args, f.func.arguments.flat_positional, is_kwarg_only=False)
108
+ if f"{f.func.name.name}" not in _get_kwargs_func_exclusion_list():
109
+ _add_out_arg(out_args, f.func.arguments.flat_kwarg_only, is_kwarg_only=True)
110
+
111
+ return f"{f.func.name.name}: {repr(out_args)},"
112
+
113
+
114
+ def main() -> None:
115
+ parser = argparse.ArgumentParser(description="Generate annotated_fn_args script")
116
+ parser.add_argument(
117
+ "native_functions", metavar="NATIVE", help="path to native_functions.yaml"
118
+ )
119
+ parser.add_argument("tags", metavar="TAGS", help="path to tags.yaml")
120
+ parser.add_argument("out", metavar="OUT", help="path to output directory")
121
+ parser.add_argument(
122
+ "autograd", metavar="AUTOGRAD", help="path to template directory"
123
+ )
124
+ args = parser.parse_args()
125
+ gen_annotated(args.native_functions, args.tags, args.out, args.autograd)
126
+
127
+
128
+ if __name__ == "__main__":
129
+ main()
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ To run this file by hand from the root of the PyTorch
3
+ repository, run:
4
+
5
+ python -m tools.autograd.gen_autograd \
6
+ aten/src/ATen/native/native_functions.yaml \
7
+ aten/src/ATen/native/tags.yaml \
8
+ $OUTPUT_DIR \
9
+ tools/autograd
10
+
11
+ Where $OUTPUT_DIR is where you would like the files to be
12
+ generated. In the full build system, OUTPUT_DIR is
13
+ torch/csrc/autograd/generated/
14
+ """
15
+
16
+ # gen_autograd.py generates C++ autograd functions and Python bindings.
17
+ #
18
+ # It delegates to the following scripts:
19
+ #
20
+ # gen_autograd_functions.py: generates subclasses of torch::autograd::Node
21
+ # gen_variable_type.py: generates VariableType.h which contains all tensor methods
22
+ # gen_python_functions.py: generates Python bindings to THPVariable
23
+ #
24
+
25
+ import argparse
26
+ import os
27
+ from typing import List
28
+
29
+ from torchgen.api import cpp
30
+ from torchgen.api.autograd import (
31
+ match_differentiability_info,
32
+ NativeFunctionWithDifferentiabilityInfo,
33
+ )
34
+ from torchgen.gen import parse_native_yaml
35
+ from torchgen.selective_build.selector import SelectiveBuilder
36
+
37
+ from . import gen_python_functions
38
+ from .gen_autograd_functions import (
39
+ gen_autograd_functions_lib,
40
+ gen_autograd_functions_python,
41
+ )
42
+ from .gen_inplace_or_view_type import gen_inplace_or_view_type
43
+ from .gen_trace_type import gen_trace_type
44
+ from .gen_variable_factories import gen_variable_factories
45
+ from .gen_variable_type import gen_variable_type
46
+ from .gen_view_funcs import gen_view_funcs
47
+ from .load_derivatives import load_derivatives
48
+
49
+
50
+ def gen_autograd(
51
+ native_functions_path: str,
52
+ tags_path: str,
53
+ out: str,
54
+ autograd_dir: str,
55
+ operator_selector: SelectiveBuilder,
56
+ disable_autograd: bool = False,
57
+ ) -> None:
58
+ # Parse and load derivatives.yaml
59
+ differentiability_infos, used_dispatch_keys = load_derivatives(
60
+ os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
61
+ )
62
+
63
+ template_path = os.path.join(autograd_dir, "templates")
64
+
65
+ native_funcs = parse_native_yaml(native_functions_path, tags_path).native_functions
66
+ fns = sorted(
67
+ filter(
68
+ operator_selector.is_native_function_selected_for_training, native_funcs
69
+ ),
70
+ key=lambda f: cpp.name(f.func),
71
+ )
72
+ fns_with_diff_infos: List[
73
+ NativeFunctionWithDifferentiabilityInfo
74
+ ] = match_differentiability_info(fns, differentiability_infos)
75
+
76
+ # Generate VariableType.h/cpp
77
+ if not disable_autograd:
78
+ gen_variable_type(
79
+ out,
80
+ native_functions_path,
81
+ tags_path,
82
+ fns_with_diff_infos,
83
+ template_path,
84
+ used_dispatch_keys,
85
+ )
86
+
87
+ gen_inplace_or_view_type(
88
+ out, native_functions_path, tags_path, fns_with_diff_infos, template_path
89
+ )
90
+
91
+ # operator filter not applied as tracing sources are excluded in selective build
92
+ gen_trace_type(out, native_funcs, template_path)
93
+ # Generate Functions.h/cpp
94
+ gen_autograd_functions_lib(out, differentiability_infos, template_path)
95
+
96
+ # Generate variable_factories.h
97
+ gen_variable_factories(out, native_functions_path, tags_path, template_path)
98
+
99
+ # Generate ViewFuncs.h/cpp
100
+ gen_view_funcs(out, fns_with_diff_infos, template_path)
101
+
102
+
103
+ def gen_autograd_python(
104
+ native_functions_path: str,
105
+ tags_path: str,
106
+ out: str,
107
+ autograd_dir: str,
108
+ ) -> None:
109
+ differentiability_infos, _ = load_derivatives(
110
+ os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
111
+ )
112
+
113
+ template_path = os.path.join(autograd_dir, "templates")
114
+
115
+ # Generate Functions.h/cpp
116
+ gen_autograd_functions_python(out, differentiability_infos, template_path)
117
+
118
+ # Generate Python bindings
119
+ deprecated_path = os.path.join(autograd_dir, "deprecated.yaml")
120
+ gen_python_functions.gen(
121
+ out, native_functions_path, tags_path, deprecated_path, template_path
122
+ )
123
+
124
+
125
+ def main() -> None:
126
+ parser = argparse.ArgumentParser(description="Generate autograd C++ files script")
127
+ parser.add_argument(
128
+ "native_functions", metavar="NATIVE", help="path to native_functions.yaml"
129
+ )
130
+ parser.add_argument("tags", metavar="NATIVE", help="path to tags.yaml")
131
+ parser.add_argument("out", metavar="OUT", help="path to output directory")
132
+ parser.add_argument(
133
+ "autograd", metavar="AUTOGRAD", help="path to autograd directory"
134
+ )
135
+ args = parser.parse_args()
136
+ gen_autograd(
137
+ args.native_functions,
138
+ args.tags,
139
+ args.out,
140
+ args.autograd,
141
+ SelectiveBuilder.get_nop_selector(),
142
+ )
143
+
144
+
145
+ if __name__ == "__main__":
146
+ main()
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd_functions.py ADDED
@@ -0,0 +1,912 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates C++ autograd functions for the derivatives of ATen operations
2
+ #
3
+ # This writes two files:
4
+ # Functions.h/cpp: subclasses of autograd::Node
5
+ # python_functions.h/cpp: Python bindings for the above classes
6
+ #
7
+ from typing import Dict, List, Sequence, Tuple
8
+
9
+ from torchgen.api.autograd import (
10
+ Derivative,
11
+ DifferentiabilityInfo,
12
+ SavedAttribute,
13
+ uses_retain_variables,
14
+ uses_single_grad,
15
+ )
16
+ from torchgen.api.types import (
17
+ ArrayRefCType,
18
+ BaseCppType,
19
+ BaseCType,
20
+ Binding,
21
+ boolT,
22
+ doubleT,
23
+ intArrayRefT,
24
+ iTensorListRefT,
25
+ ListCType,
26
+ longT,
27
+ MutRefCType,
28
+ OptionalCType,
29
+ optionalIntArrayRefT,
30
+ optionalSymIntArrayRefT,
31
+ scalarT,
32
+ stringT,
33
+ symIntArrayRefT,
34
+ SymIntT,
35
+ TENSOR_LIST_LIKE_CTYPES,
36
+ tensorListT,
37
+ tensorT,
38
+ VectorCType,
39
+ )
40
+ from torchgen.code_template import CodeTemplate
41
+ from torchgen.model import Argument, FunctionSchema
42
+ from torchgen.utils import FileManager
43
+
44
+ from .gen_inplace_or_view_type import VIEW_FUNCTIONS
45
+
46
+ FUNCTION_DECLARATION = CodeTemplate(
47
+ """\
48
+ #ifdef _WIN32
49
+ struct ${op} : public ${superclass} {
50
+ TORCH_API ${op}() = default;
51
+ #else
52
+ struct TORCH_API ${op} : public ${superclass} {
53
+ #endif
54
+ using ${superclass}::${superclass};
55
+ variable_list apply(variable_list&& grads) override;
56
+ std::string name() const override { return "${op}"; }
57
+ void release_variables() override {
58
+ ${thread_lock}
59
+ ${release_variables}
60
+ }
61
+ ${will_release_variables}
62
+ void compiled_args(CompiledNodeArgs& args) override;
63
+ variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
64
+ ${saved_variables}
65
+ ${saved_list_sizes}
66
+ };
67
+ """
68
+ )
69
+
70
+ WILL_RELEASE_VARIABLES = CodeTemplate(
71
+ """\
72
+ bool retain_variables = true;
73
+ void will_release_variables() override {
74
+ retain_variables = false;
75
+ }
76
+ """
77
+ )
78
+
79
+ FUNCTION_DEFINITION = CodeTemplate(
80
+ """\
81
+ variable_list ${op}::apply(variable_list&& grads) {
82
+ ${thread_lock}
83
+ ${asserts}
84
+ IndexRangeGenerator gen;
85
+ ${compute_index_ranges}
86
+ variable_list grad_inputs(gen.size());
87
+ ${body}
88
+ return grad_inputs;
89
+ }
90
+ void ${op}::compiled_args(CompiledNodeArgs& args) {
91
+ ${compiled_args}
92
+ }
93
+ variable_list ${op}::apply_with_saved(const variable_list& grads, SwapSavedVariables& saved) {
94
+ ${apply_with_saved_before}
95
+ variable_list result = apply(variable_list(grads));
96
+ ${apply_with_saved_after}
97
+ return result;
98
+ }
99
+ """
100
+ )
101
+
102
+ GRAD_INPUT_MASK = CodeTemplate(
103
+ """\
104
+ auto grad_input_mask = std::array<bool, ${n}>{
105
+ ${masks}
106
+ };\
107
+ """
108
+ )
109
+
110
+ DERIVATIVE_SINGLE = CodeTemplate(
111
+ """\
112
+ if (task_should_compute_output({ ${name}_ix })) {
113
+ auto grad_result = ${derivative};
114
+ copy_range(grad_inputs, ${name}_ix, grad_result);
115
+ }
116
+ """
117
+ )
118
+
119
+ # note(crcrpar): `self` argument and other optional positional argument
120
+ # of foreach functions are basically a list of n `Tensor`s thus iterating over
121
+ # `grads` in order to utilize and apply the existing derivative definitions
122
+ # to each `Tensor`(s) of `self`, and the others.
123
+ DERIVATIVE_SINGLE_FOREACH = CodeTemplate(
124
+ """\
125
+ if (task_should_compute_output({ ${name}_ix })) {
126
+ std::vector<Tensor> grad_result;
127
+ grad_result.reserve(grads.size());
128
+ for (const auto & i : c10::irange(grads.size())) {
129
+ if (grads[i].defined()) {
130
+ grad_result.emplace_back(${derivative});
131
+ } else {
132
+ grad_result.emplace_back(Tensor());
133
+ }
134
+ }
135
+ copy_range(grad_inputs, ${name}_ix, grad_result);
136
+ }
137
+ """
138
+ )
139
+
140
+ DERIVATIVE_MULTI_COPY_RANGE = CodeTemplate(
141
+ """\
142
+ if (task_should_compute_output({ ${name}_ix })) {
143
+ copy_range(grad_inputs, ${name}_ix, std::get<${i}>(grad_result));
144
+ }
145
+ """
146
+ )
147
+
148
+ DERIVATIVE_MULTI = CodeTemplate(
149
+ """\
150
+ if (task_should_compute_output({ ${idx_ranges} })) {
151
+ ${grad_input_mask}
152
+ auto grad_result = ${derivative};
153
+ ${copy_ranges}
154
+ }
155
+ """
156
+ )
157
+
158
+ # Generates python bindings
159
+ #
160
+ # This generates the definitions for:
161
+ # (1) The PyTypeObject for each backward grad_fn subclassing Node
162
+ # (2) The entry for PyTypeObject's tp_getset slot (an array of PyGetSetDef structs)
163
+ # We generate one PyGetSetDef struct for each of grad_fn's saved inputs and outputs
164
+ # Each PyGetSetDef has a function ptr to a getter, also defined here (3).
165
+ # (3) Getters for each of grad_fn's saved inputs and outputs.
166
+ #
167
+ PY_FUNCTION_DEFINITION = CodeTemplate(
168
+ """\
169
+ static PyTypeObject ${op}Class;
170
+ addClass<${op}>(module, ${op}Class, "${op}", ${op}_properties);
171
+ """
172
+ )
173
+
174
+ PY_FUNCTION_PROPS_AND_GETTERS = CodeTemplate(
175
+ """\
176
+ ${all_getter_definitions}
177
+
178
+ static struct PyGetSetDef ${op}_properties[] = {
179
+ THP_FUNCTION_DEFAULT_PROPERTIES,
180
+ ${all_getsetdef_structs}
181
+ {nullptr} /* sentinel */
182
+ };
183
+
184
+ """
185
+ )
186
+
187
+ PY_GETSETDEF_STRUCT = CodeTemplate(
188
+ """\
189
+ {(char*)"_saved_${name}", (getter)THP${op}_${name}_getter, nullptr, nullptr, nullptr}"""
190
+ )
191
+
192
+ PY_RAW_GETSETDEF_STRUCT = CodeTemplate(
193
+ """\
194
+ {(char*)"_raw_saved_${name}", (getter)THP${op}_${name}_raw_getter, nullptr, nullptr, nullptr}"""
195
+ )
196
+
197
+ # Getter templates
198
+ GETTER_DEFINITION = CodeTemplate(
199
+ """\
200
+ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
201
+ HANDLE_TH_ERRORS
202
+ auto prop = static_cast<${op}*>(self->cdata.get())->${name};
203
+ ${body}
204
+ END_HANDLE_TH_ERRORS
205
+ }
206
+ """
207
+ )
208
+
209
+ GETTER_DEFINITION_SAVEDVAR = CodeTemplate(
210
+ """\
211
+ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
212
+ HANDLE_TH_ERRORS
213
+ const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
214
+ ${body}
215
+ END_HANDLE_TH_ERRORS
216
+ }
217
+ """
218
+ )
219
+
220
+ GETTER_DEFINITION_RAW_SAVEDVAR = CodeTemplate(
221
+ """\
222
+ PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
223
+ HANDLE_TH_ERRORS
224
+ const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
225
+ ${body}
226
+ END_HANDLE_TH_ERRORS
227
+ }
228
+ """
229
+ )
230
+
231
+ GETTER_DEFINITION_VEC_SAVEDVAR = CodeTemplate(
232
+ """\
233
+ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
234
+ HANDLE_TH_ERRORS
235
+ const auto *node = static_cast<${op}*>(self->cdata.get());
236
+ const auto& prop = node->${name}_;
237
+ if (node->${name}_released_) {
238
+ PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
239
+ return nullptr;
240
+ }
241
+ ${body}
242
+ END_HANDLE_TH_ERRORS
243
+ }
244
+ """
245
+ )
246
+
247
+ GETTER_DEFINITION_RAW_VEC_SAVEDVAR = CodeTemplate(
248
+ """\
249
+ PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
250
+ HANDLE_TH_ERRORS
251
+ const auto *node = static_cast<${op}*>(self->cdata.get());
252
+ const auto& prop = node->${name}_;
253
+ if (node->${name}_released_) {
254
+ PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
255
+ return nullptr;
256
+ }
257
+ ${body}
258
+ END_HANDLE_TH_ERRORS
259
+ }
260
+ """
261
+ )
262
+
263
+ GETTER_DEFINITION_OPT = CodeTemplate(
264
+ """\
265
+ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
266
+ HANDLE_TH_ERRORS
267
+ auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
268
+ if (!opt_prop.has_value()) {
269
+ Py_RETURN_NONE;
270
+ }
271
+ auto prop = opt_prop.value();
272
+ ${body}
273
+ END_HANDLE_TH_ERRORS
274
+ }
275
+ """
276
+ )
277
+
278
+ GETTER_DEFINITION_OPT_ARRAYREF = CodeTemplate(
279
+ """\
280
+ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
281
+ HANDLE_TH_ERRORS
282
+ auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
283
+ if (!opt_prop.list.has_value()) {
284
+ Py_RETURN_NONE;
285
+ }
286
+ auto prop = opt_prop.list.value();
287
+ ${body}
288
+ END_HANDLE_TH_ERRORS
289
+ }
290
+ """
291
+ )
292
+
293
+ # Getter body
294
+ GETTER_BODY_SAVEDVAR = """\
295
+ return THPVariable_Wrap(prop.unpack(self->cdata));
296
+ """
297
+
298
+ GETTER_BODY_RAW_SAVEDVAR = """\
299
+ pybind11::object obj = pybind11::cast(prop, pybind11::return_value_policy::reference);
300
+ return obj.release().ptr();
301
+ """
302
+
303
+ GETTER_BODY_VEC_SAVEDVAR = """\
304
+ PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
305
+ for (auto i: c10::irange(prop.size())) {
306
+ PyTuple_SetItem(tup, (Py_ssize_t) i, THPVariable_Wrap(prop[i].unpack(self->cdata)));
307
+ }
308
+ return tup;
309
+ """
310
+
311
+ GETTER_BODY_RAW_VEC_SAVEDVAR = """\
312
+ PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
313
+ for (auto i : c10::irange(prop.size())) {
314
+ pybind11::object obj = pybind11::cast(prop[i], pybind11::return_value_policy::reference);
315
+ PyTuple_SetItem(tup, (Py_ssize_t) i, obj.release().ptr());
316
+ }
317
+ return tup;
318
+ """
319
+
320
+ GETTER_BODY_ARRAYREF_LONG = """\
321
+ PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
322
+ for (auto i : c10::irange(prop.size())) {
323
+ PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromUnsignedLong((uint64_t) prop[i]));
324
+ }
325
+ return tup;
326
+ """
327
+
328
+ GETTER_BODY_ARRAYREF_SYMINT = """\
329
+ PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
330
+ for (auto i : c10::irange(prop.size())) {
331
+ auto si = prop[i];
332
+ if (auto m = si.maybe_as_int()) {
333
+ PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromUnsignedLong(*m));
334
+ } else {
335
+ auto py_symint = py::cast(si).release().ptr();
336
+ PyTuple_SetItem(tup, (Py_ssize_t) i, py_symint);
337
+ }
338
+ }
339
+ return tup;
340
+ """
341
+
342
+ GETTER_BODY_ARRAYREF_DOUBLE = """\
343
+ PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
344
+ for (auto i : c10::irange(prop.size())) {
345
+ PyTuple_SetItem(tup, (Py_ssize_t) i, PyFloat_FromDouble((double) prop[i]));
346
+ }
347
+ return tup;
348
+ """
349
+
350
+ GETTER_BODY_INT64_T = """\
351
+ return PyLong_FromUnsignedLong((int64_t) prop);
352
+ """
353
+
354
+ GETTER_BODY_SYMINT = """\
355
+ if (auto m = prop.maybe_as_int()) {
356
+ return PyLong_FromUnsignedLong(*m);
357
+ } else {
358
+ return py::cast(prop).release().ptr();
359
+ }
360
+ """
361
+
362
+ GETTER_BODY_DOUBLE = """\
363
+ return PyFloat_FromDouble((double) prop);
364
+ """
365
+
366
+ GETTER_BODY_BOOL = """\
367
+ if (prop) {
368
+ Py_RETURN_TRUE;
369
+ } else {
370
+ Py_RETURN_FALSE;
371
+ }
372
+ """
373
+
374
+ GETTER_BODY_STRING = """\
375
+ return PyUnicode_FromStringAndSize(prop.data(), prop.size());
376
+ """
377
+
378
+ GETTER_BODY_SCALAR = """\
379
+ if (prop.isComplex()) {
380
+ auto cprop = prop.to<c10::complex<double>>();
381
+ return PyComplex_FromDoubles(cprop.real(), cprop.imag());
382
+ } else if (prop.isFloatingPoint()) {
383
+ return PyFloat_FromDouble(prop.to<double>());
384
+ } else if (prop.isIntegral(/*includeBool=*/false)) {
385
+ return PyLong_FromLong(prop.to<int64_t>());
386
+ } else if (prop.isBoolean()) {
387
+ if (prop.to<bool>()) {
388
+ Py_RETURN_TRUE;
389
+ } else {
390
+ Py_RETURN_FALSE;
391
+ }
392
+ } else {
393
+ PyErr_SetString(PyExc_RuntimeError, "Unknown scalar type");
394
+ return nullptr;
395
+ }
396
+ """
397
+
398
+
399
+ GETTER_BODY_VEC_SCALAR = """\
400
+ PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
401
+ for (auto i: c10::irange(prop.size())) {
402
+ if (prop[i].isComplex()) {
403
+ auto cprop = prop[i].to<c10::complex<double>>();
404
+ PyTuple_SetItem(tup, (Py_ssize_t) i, PyComplex_FromDoubles(cprop.real(), cprop.imag()));
405
+ } else if (prop[i].isFloatingPoint()) {
406
+ auto double_prop = prop[i].to<double>();
407
+ PyTuple_SetItem(tup, (Py_ssize_t) i, PyFloat_FromDouble(double_prop));
408
+ } else if (prop[i].isIntegral(/*includeBool=*/false)) {
409
+ auto long_prop = prop[i].to<int64_t>();
410
+ PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromLong(long_prop));
411
+ } else if (prop[i].isBoolean()) {
412
+ if (prop[i].to<bool>()) {
413
+ PyTuple_SetItem(tup, (Py_ssize_t) i, Py_True);
414
+ } else {
415
+ PyTuple_SetItem(tup, (Py_ssize_t) i, Py_False);
416
+ }
417
+ } else {
418
+ PyErr_SetString(PyExc_RuntimeError, "Unknown scalar type");
419
+ return nullptr;
420
+ }
421
+ }
422
+ return tup;
423
+ """
424
+
425
+
426
+ MISC_GETTER_DEFS = {
427
+ OptionalCType(BaseCType(longT)): (GETTER_DEFINITION_OPT, GETTER_BODY_INT64_T),
428
+ OptionalCType(BaseCType(SymIntT)): (GETTER_DEFINITION_OPT, GETTER_BODY_SYMINT),
429
+ BaseCType(doubleT): (GETTER_DEFINITION, GETTER_BODY_DOUBLE),
430
+ OptionalCType(BaseCType(doubleT)): (GETTER_DEFINITION_OPT, GETTER_BODY_DOUBLE),
431
+ BaseCType(boolT): (GETTER_DEFINITION, GETTER_BODY_BOOL),
432
+ BaseCType(scalarT): (GETTER_DEFINITION, GETTER_BODY_SCALAR),
433
+ OptionalCType(BaseCType(scalarT)): (GETTER_DEFINITION_OPT, GETTER_BODY_SCALAR),
434
+ }
435
+
436
+ # These functions have backwards which cannot be traced, and so must have
437
+ # their backward functions traced opaquely.
438
+ # VIEW_FUNCTIONS are not traceable because they use as_strided, which
439
+ # has an untraceable backwards, see
440
+ # https://github.com/pytorch/pytorch/issues/4250
441
+ # TODO: This is probably not exhaustive, but it's a start
442
+ UNTRACEABLE_FUNCTIONS = VIEW_FUNCTIONS
443
+
444
+
445
+ def get_infos_with_derivatives_list(
446
+ differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]]
447
+ ) -> List[DifferentiabilityInfo]:
448
+ diff_info_list = [
449
+ info
450
+ for diffinfo_dict in differentiability_infos.values()
451
+ for info in diffinfo_dict.values()
452
+ ]
453
+
454
+ return list(filter(lambda info: info.args_with_derivatives, diff_info_list))
455
+
456
+
457
+ def gen_autograd_functions_lib(
458
+ out: str,
459
+ differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
460
+ template_path: str,
461
+ ) -> None:
462
+ """Functions.h and Functions.cpp body
463
+
464
+ These contain the auto-generated subclasses of torch::autograd::Node
465
+ for each every differentiable torch function.
466
+ """
467
+
468
+ # get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here
469
+ # infos with the diff dispatchkeys but the same name will still be in the same shard.
470
+ infos = get_infos_with_derivatives_list(differentiability_infos)
471
+ declarations = [process_function(f, FUNCTION_DECLARATION) for f in infos]
472
+ definitions = [process_function(f, FUNCTION_DEFINITION) for f in infos]
473
+
474
+ file_basename = "Functions"
475
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
476
+ for suffix in [".h", ".cpp"]:
477
+ fname = file_basename + suffix
478
+ fm.write_with_template(
479
+ fname,
480
+ fname,
481
+ lambda: {
482
+ "generated_comment": "@"
483
+ + f"generated from {fm.template_dir_for_comments()}/"
484
+ + fname,
485
+ "autograd_function_declarations": declarations,
486
+ "autograd_function_definitions": definitions,
487
+ },
488
+ )
489
+
490
+
491
+ def gen_autograd_functions_python(
492
+ out: str,
493
+ differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
494
+ template_path: str,
495
+ ) -> None:
496
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
497
+ num_shards = 5
498
+ fm.write(
499
+ "python_functions.h",
500
+ lambda: {
501
+ "generated_comment": "@"
502
+ + f"generated from {fm.template_dir_for_comments()}/python_functions.h",
503
+ "shard_forward_declare": [
504
+ f"void initialize_autogenerated_functions_{i}(PyObject* module);"
505
+ for i in range(num_shards)
506
+ ],
507
+ "shard_call": [
508
+ f"initialize_autogenerated_functions_{i}(module);"
509
+ for i in range(num_shards)
510
+ ],
511
+ },
512
+ )
513
+
514
+ # get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here
515
+ # infos with the diff dispatchkeys but the same name will still be in the same shard.
516
+ infos = get_infos_with_derivatives_list(differentiability_infos)
517
+ fm.write_sharded(
518
+ "python_functions.cpp",
519
+ infos,
520
+ key_fn=lambda info: info.name,
521
+ base_env={
522
+ "generated_comment": "@"
523
+ + f"generated from {fm.template_dir_for_comments()}/python_functions.cpp",
524
+ },
525
+ env_callable=lambda info: {
526
+ "py_function_initializers": [
527
+ process_function(info, PY_FUNCTION_DEFINITION)
528
+ ],
529
+ "py_function_props_and_getters": [
530
+ process_function(info, PY_FUNCTION_PROPS_AND_GETTERS)
531
+ ],
532
+ },
533
+ num_shards=num_shards,
534
+ sharded_keys={"py_function_initializers", "py_function_props_and_getters"},
535
+ )
536
+
537
+
538
+ def process_function(info: DifferentiabilityInfo, template: CodeTemplate) -> str:
539
+ saved_variables: List[str] = []
540
+ release_variables: List[str] = []
541
+ saved_list_sizes: List[str] = []
542
+ unpack: List[str] = []
543
+ asserts: List[str] = []
544
+ compute_index_ranges: List[str] = []
545
+ getter_definitions: List[str] = []
546
+ py_getsetdef_structs: List[str] = []
547
+ compiled_args: List[str] = []
548
+ apply_with_saved_before: List[str] = []
549
+ apply_with_saved_after: List[str] = []
550
+
551
+ for arg in info.args_with_derivatives:
552
+ if arg.type in TENSOR_LIST_LIKE_CTYPES:
553
+ size = f"{arg.name}_size_"
554
+ saved_list_sizes.append(f"size_t {arg.name}_size_;")
555
+ else:
556
+ size = "1"
557
+ compute_index_ranges.append(f"auto {arg.name}_ix = gen.range({size});")
558
+
559
+ def save_var(var: SavedAttribute, is_output: bool) -> None:
560
+ name = var.nctype.name
561
+ type = var.nctype.type
562
+ should_append_getsetdef = True
563
+ should_append_raw_getsetdef = False
564
+ visit_name = name
565
+
566
+ if (
567
+ type == BaseCType(tensorT)
568
+ or type == OptionalCType(BaseCType(tensorT))
569
+ or type == MutRefCType(OptionalCType(BaseCType(tensorT)))
570
+ or (type == BaseCType(scalarT) and is_output)
571
+ ):
572
+ saved_variables.append(f"SavedVariable {name}_;")
573
+ release_variables.append(f"{name}_.reset_data();")
574
+ ptr = "shared_from_this()" if is_output else ""
575
+ unpack.append(f"auto {name} = {name}_.unpack({ptr});")
576
+ getter_definitions.append(
577
+ GETTER_DEFINITION_SAVEDVAR.substitute(
578
+ op=info.op, name=name, body=GETTER_BODY_SAVEDVAR
579
+ )
580
+ )
581
+ getter_definitions.append(
582
+ GETTER_DEFINITION_RAW_SAVEDVAR.substitute(
583
+ op=info.op, name=name, body=GETTER_BODY_RAW_SAVEDVAR
584
+ )
585
+ )
586
+ should_append_raw_getsetdef = True
587
+ visit_name = f"{name}_"
588
+ elif (
589
+ type == BaseCType(tensorListT)
590
+ or type == BaseCType(iTensorListRefT)
591
+ or type == VectorCType(BaseCType(tensorT))
592
+ ):
593
+ # note(crcrpar): [nuanced return type of out-of-place foreach functions]
594
+ # When an out-of-place foreach function whose return signature is `Tensor[]`
595
+ # spells out its backward definitions in `derivatives.yaml`, and some of them depend on
596
+ # `result`, `result`'s type is interpreted and treated as `std::vector<Tensor>`.
597
+ # An out-of-place foreach whose backwards rely on their output doesn't suffer from this
598
+ # difference if the definitions are codegen'ed.
599
+ # This special case is needed for `_foreach_pow.List` and `_foreach_pow.ScalarAndTensor`
600
+ # as of https://github.com/pytorch/pytorch/pull/105504.
601
+ if type == VectorCType(BaseCType(tensorT)):
602
+ assert (
603
+ info.func.func.name.name.base.startswith("_foreach") and is_output
604
+ )
605
+ saved_variables.append(f"std::vector<SavedVariable> {name}_;")
606
+ saved_variables.append(f"bool {name}_released_ = false;")
607
+ # Just clear() is sufficient, we don't need to loop and clear each variable.
608
+ # Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
609
+ release_variables.append(f"{name}_.clear();")
610
+ release_variables.append(f"{name}_released_ = true;")
611
+ ptr = "shared_from_this()" if is_output else "nullptr"
612
+ unpack.append(f"auto {name} = unpack_list({name}_, {ptr});")
613
+ asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
614
+ getter_definitions.append(
615
+ GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
616
+ op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR
617
+ )
618
+ )
619
+ getter_definitions.append(
620
+ GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
621
+ op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR
622
+ )
623
+ )
624
+ should_append_raw_getsetdef = True
625
+ visit_name = f"{name}_"
626
+ elif type == ListCType(OptionalCType(BaseCType(tensorT))):
627
+ saved_variables.append(f"std::vector<SavedVariable> {name}_;")
628
+ saved_variables.append(f"bool {name}_released_ = false;")
629
+ # Just clear() is sufficient, we don't need to loop and clear each variable.
630
+ # Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
631
+ release_variables.append(f"{name}_.clear();")
632
+ release_variables.append(f"{name}_released_ = true;")
633
+ unpack.append(f"auto {name} = unpack_opt_list({name}_);")
634
+ asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
635
+ getter_definitions.append(
636
+ GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
637
+ op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR
638
+ )
639
+ )
640
+ getter_definitions.append(
641
+ GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
642
+ op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR
643
+ )
644
+ )
645
+ should_append_raw_getsetdef = True
646
+ visit_name = f"{name}_"
647
+ elif type == BaseCType(intArrayRefT):
648
+ saved_variables.append(f"std::vector<int64_t> {name};")
649
+ getter_definitions.append(
650
+ GETTER_DEFINITION.substitute(
651
+ op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
652
+ )
653
+ )
654
+ elif type == BaseCType(symIntArrayRefT):
655
+ saved_variables.append(f"std::vector<c10::SymInt> {name};")
656
+ getter_definitions.append(
657
+ GETTER_DEFINITION.substitute(
658
+ op=info.op, name=name, body=GETTER_BODY_ARRAYREF_SYMINT
659
+ )
660
+ )
661
+ elif type == BaseCType(optionalIntArrayRefT):
662
+ saved_variables.append(f"c10::OptionalArray<int64_t> {name};")
663
+ getter_definitions.append(
664
+ GETTER_DEFINITION_OPT_ARRAYREF.substitute(
665
+ op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
666
+ )
667
+ )
668
+ elif type == BaseCType(optionalSymIntArrayRefT):
669
+ saved_variables.append(f"c10::OptionalArray<c10::SymInt> {name};")
670
+ getter_definitions.append(
671
+ GETTER_DEFINITION_OPT_ARRAYREF.substitute(
672
+ op=info.op, name=name, body=GETTER_BODY_ARRAYREF_SYMINT
673
+ )
674
+ )
675
+ elif type == OptionalCType(BaseCType(intArrayRefT)):
676
+ saved_variables.append(f"c10::OptionalArray<int64_t> {name};")
677
+ getter_definitions.append(
678
+ GETTER_DEFINITION_OPT_ARRAYREF.substitute(
679
+ op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
680
+ )
681
+ )
682
+ elif type == OptionalCType(BaseCType(symIntArrayRefT)):
683
+ saved_variables.append(f"c10::OptionalArray<c10::SymInt> {name};")
684
+ getter_definitions.append(
685
+ GETTER_DEFINITION_OPT_ARRAYREF.substitute(
686
+ op=info.op, name=name, body=GETTER_BODY_ARRAYREF_SYMINT
687
+ )
688
+ )
689
+ elif type == OptionalCType(ArrayRefCType(BaseCType(doubleT))):
690
+ saved_variables.append(f"c10::OptionalArray<double> {name};")
691
+ getter_definitions.append(
692
+ GETTER_DEFINITION_OPT_ARRAYREF.substitute(
693
+ op=info.op, name=name, body=GETTER_BODY_ARRAYREF_DOUBLE
694
+ )
695
+ )
696
+ elif type == BaseCType(longT):
697
+ saved_variables.append(f"{type.cpp_type()} {name} = 0;")
698
+ getter_definitions.append(
699
+ GETTER_DEFINITION.substitute(
700
+ op=info.op, name=name, body=GETTER_BODY_INT64_T
701
+ )
702
+ )
703
+ elif type == BaseCType(SymIntT):
704
+ saved_variables.append(f"c10::SymInt {name};")
705
+ getter_definitions.append(
706
+ GETTER_DEFINITION.substitute(
707
+ op=info.op, name=name, body=GETTER_BODY_SYMINT
708
+ )
709
+ )
710
+ elif type == BaseCType(stringT):
711
+ saved_variables.append(f"std::string {name};")
712
+ getter_definitions.append(
713
+ GETTER_DEFINITION.substitute(
714
+ op=info.op, name=name, body=GETTER_BODY_STRING
715
+ )
716
+ )
717
+ elif type == OptionalCType(BaseCType(stringT)):
718
+ saved_variables.append(f"c10::optional<std::string> {name};")
719
+ getter_definitions.append(
720
+ GETTER_DEFINITION_OPT.substitute(
721
+ op=info.op, name=name, body=GETTER_BODY_STRING
722
+ )
723
+ )
724
+ elif type == ArrayRefCType(
725
+ elem=BaseCType(type=BaseCppType(ns="at", name="Scalar"))
726
+ ):
727
+ saved_variables.append(f"std::vector<at::Scalar> {name};")
728
+ saved_variables.append(f"bool {name}_released_ = false;")
729
+ # Just clear() is sufficient, we don't need to loop and clear each variable.
730
+ # Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
731
+ release_variables.append(f"{name}.clear();")
732
+ # release_variables.append(f"{name}_released_ = true;")
733
+ # unpack.append(f"auto {name} = unpack_list({name}_);")
734
+ # asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
735
+ getter_definitions.append(
736
+ CodeTemplate(
737
+ """\
738
+ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
739
+ HANDLE_TH_ERRORS
740
+ const auto *node = static_cast<${op}*>(self->cdata.get());
741
+ const auto& prop = node->${name};
742
+ if (node->${name}_released_) {
743
+ PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
744
+ return nullptr;
745
+ }
746
+ ${body}
747
+ END_HANDLE_TH_ERRORS
748
+ }
749
+ """
750
+ ).substitute(
751
+ op=info.op,
752
+ name=name,
753
+ body=GETTER_BODY_VEC_SCALAR,
754
+ )
755
+ )
756
+ else:
757
+ # Check for indicators that you're putting a non-owning reference
758
+ # into the saved variable field. If this is spuriously firing,
759
+ # edit this field. Otherwise, you probably need to add a case
760
+ # above.
761
+ assert (
762
+ "ref" not in type.cpp_type().lower()
763
+ and "view" not in type.cpp_type().lower()
764
+ and "*" not in type.cpp_type()
765
+ and "&" not in type.cpp_type()
766
+ ), f"{type.cpp_type()} looks like it contains a non-owning reference"
767
+ saved_variables.append(f"{type.cpp_type()} {name};")
768
+
769
+ if type in MISC_GETTER_DEFS:
770
+ getter_def, body = MISC_GETTER_DEFS[type]
771
+ getter_definitions.append(
772
+ getter_def.substitute(op=info.op, name=name, body=body)
773
+ )
774
+ else:
775
+ # Types we don't expose python bindings to yet:
776
+ # TypeAndSize, at::ScalarType, TensorOptions, TensorGeometry,
777
+ # std::vector<std::vector<int64_t>>, std::vector<at::ScalarType>
778
+ should_append_getsetdef = False
779
+
780
+ if should_append_getsetdef:
781
+ py_getsetdef_structs.append(
782
+ PY_GETSETDEF_STRUCT.substitute(op=info.op, name=name)
783
+ )
784
+ if should_append_raw_getsetdef:
785
+ py_getsetdef_structs.append(
786
+ PY_RAW_GETSETDEF_STRUCT.substitute(op=info.op, name=name)
787
+ )
788
+
789
+ compiled_args.append(f"args.collect({visit_name});")
790
+ apply_with_saved_before.append(f"saved.before({visit_name});")
791
+ apply_with_saved_after.append(f"saved.after({visit_name});")
792
+
793
+ for var in sorted(info.all_saved_inputs, key=lambda sa: str(sa.nctype.name)):
794
+ save_var(var, is_output=False)
795
+ for var in sorted(info.all_saved_outputs, key=lambda sa: str(sa.nctype.name)):
796
+ save_var(var, is_output=True)
797
+
798
+ # lock the mutex when we release variables and in Node::apply to protect thread safety
799
+ # see Note [Thread Safety on Autograd Node]
800
+ if len(release_variables) > 0:
801
+ thread_lock = "std::lock_guard<std::mutex> lock(mutex_);"
802
+ else:
803
+ thread_lock = ""
804
+
805
+ if uses_retain_variables(info):
806
+ will_release_variables = WILL_RELEASE_VARIABLES.substitute()
807
+ else:
808
+ will_release_variables = ""
809
+
810
+ body: List[str] = []
811
+
812
+ if uses_single_grad(info):
813
+ body.append("const auto& grad = grads[0];")
814
+ else:
815
+ # Generate aliases for gradients named for returned values.
816
+ body.extend(
817
+ f"const auto& {name} = grads[{info.available_named_gradients.index(name)}];"
818
+ for name in sorted(info.used_named_gradients)
819
+ )
820
+
821
+ def emit_derivative(
822
+ derivative: Derivative,
823
+ args_with_derivatives: Sequence[Binding],
824
+ ) -> Tuple[bool, str]:
825
+ formula = derivative.formula
826
+ var_names = derivative.var_names
827
+ if len(var_names) == 1:
828
+ checks_any_grad_defined = False
829
+ if "not_implemented" not in formula:
830
+ matching_args = [
831
+ arg for arg in args_with_derivatives if arg.name == var_names[0]
832
+ ]
833
+ if len(matching_args) == 1:
834
+ # We can add undefined grad support if the input variable is a Tensor
835
+ arg = matching_args[0]
836
+ if isinstance(arg.argument, Argument) and str(
837
+ arg.argument.type
838
+ ) in ("Tensor", "Tensor?"):
839
+ formula = "any_grad_defined ? (" + formula + ") : Tensor()"
840
+ checks_any_grad_defined = True
841
+ if info.name.startswith("_foreach_"):
842
+ derivative_template = DERIVATIVE_SINGLE_FOREACH
843
+ else:
844
+ derivative_template = DERIVATIVE_SINGLE
845
+ return (
846
+ checks_any_grad_defined,
847
+ derivative_template.substitute(name=var_names[0], derivative=formula),
848
+ )
849
+ else:
850
+ if "grad_input_mask" in formula:
851
+ masks = [
852
+ f"task_should_compute_output({{ {n}_ix }})," for n in var_names
853
+ ]
854
+ grad_input_mask = GRAD_INPUT_MASK.substitute(
855
+ masks=masks, n=len(var_names)
856
+ )
857
+ else:
858
+ grad_input_mask = ""
859
+ idx_ranges = ", ".join(f"{n}_ix" for n in var_names)
860
+ copy_ranges: List[str] = []
861
+ for i, n in enumerate(var_names):
862
+ copy_ranges.append(DERIVATIVE_MULTI_COPY_RANGE.substitute(name=n, i=i))
863
+ return False, DERIVATIVE_MULTI.substitute(
864
+ idx_ranges=idx_ranges,
865
+ copy_ranges=copy_ranges,
866
+ derivative=formula,
867
+ grad_input_mask=grad_input_mask,
868
+ )
869
+
870
+ body.extend(unpack)
871
+ need_any_grad_defined_var = False
872
+ for derivative in info.derivatives:
873
+ checks_any_grad_defined, derivative_text = emit_derivative(
874
+ derivative, info.args_with_derivatives
875
+ )
876
+ body.append(derivative_text)
877
+ need_any_grad_defined_var |= checks_any_grad_defined
878
+ # Since single-output derivative formulas need to check if grads are
879
+ # defined, only perform the check once, before all the formulas
880
+ if need_any_grad_defined_var:
881
+ body.insert(
882
+ -len(info.derivatives),
883
+ "bool any_grad_defined = any_variable_defined(grads);",
884
+ )
885
+
886
+ if info.name in UNTRACEABLE_FUNCTIONS:
887
+ superclass = "Node"
888
+ else:
889
+ superclass = "TraceableFunction"
890
+
891
+ all_getsetdef_structs = (
892
+ ",\n".join(py_getsetdef_structs) + "," if len(py_getsetdef_structs) != 0 else ""
893
+ )
894
+ all_getter_definitions = "\n".join(getter_definitions)
895
+
896
+ return template.substitute(
897
+ op=info.op,
898
+ compute_index_ranges=compute_index_ranges,
899
+ saved_variables=saved_variables,
900
+ release_variables=release_variables,
901
+ saved_list_sizes=saved_list_sizes,
902
+ asserts=asserts,
903
+ thread_lock=thread_lock,
904
+ will_release_variables=will_release_variables,
905
+ body=body,
906
+ superclass=superclass,
907
+ all_getter_definitions=all_getter_definitions,
908
+ all_getsetdef_structs=all_getsetdef_structs,
909
+ compiled_args=compiled_args,
910
+ apply_with_saved_before=apply_with_saved_before,
911
+ apply_with_saved_after=apply_with_saved_after,
912
+ )
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates ADInplaceOrViewType.h/cpp
2
+ #
3
+ # NOTE: If any changes are being made to the ADInplaceOrView codegen please also check
4
+ # if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
5
+ # The fallback is expected to mimick this codegen, so we should keep the two in sync.
6
+
7
+ from typing import Dict, List, Optional, Tuple
8
+
9
+ from torchgen.api import cpp
10
+ from torchgen.api.autograd import (
11
+ dispatch_strategy,
12
+ gen_differentiable_outputs,
13
+ NativeFunctionWithDifferentiabilityInfo,
14
+ )
15
+ from torchgen.api.types import (
16
+ BaseCType,
17
+ Binding,
18
+ boolT,
19
+ ConstRefCType,
20
+ CType,
21
+ DispatcherSignature,
22
+ intArrayRefT,
23
+ longT,
24
+ OptionalCType,
25
+ symIntArrayRefT,
26
+ SymIntT,
27
+ # See Note [Nested Arg Types]
28
+ tensorT,
29
+ )
30
+ from torchgen.code_template import CodeTemplate
31
+ from torchgen.context import with_native_function
32
+ from torchgen.model import (
33
+ NativeFunction,
34
+ SchemaKind,
35
+ SelfArgument,
36
+ TensorOptionsArguments,
37
+ Type,
38
+ )
39
+ from torchgen.utils import FileManager
40
+
41
+ from .context import with_native_function_with_differentiability_info
42
+ from .gen_trace_type import (
43
+ get_return_value,
44
+ MANUAL_AUTOGRAD,
45
+ tie_return_values,
46
+ type_wrapper_name,
47
+ )
48
+
49
+ # See NOTE [ Autograd View Variables ] in variable.h for details.
50
+ # If you update list VIEW_FUNCTIONS or RETURNS_VIEWS_OF_INPUT,
51
+ # you **MUST** also update the public list of view ops accordingly in
52
+ # docs/source/tensor_view.rst. Note not all ATen functions are exposed to public,
53
+ # e.g alias & sparse_coo_tensor_with_dims_and_tensors.
54
+ #
55
+ # A map: function name => name of the argument that all outputs are view of
56
+
57
+ VIEW_FUNCTIONS_WITH_METADATA_CHANGE = [
58
+ "view_as_complex",
59
+ "view_as_real",
60
+ "_conj",
61
+ "_neg_view",
62
+ "_nested_get_values",
63
+ "_nested_view_from_buffer",
64
+ "_nested_view_from_jagged",
65
+ ]
66
+
67
+ VIEW_FUNCTIONS = {
68
+ "numpy_T": "self",
69
+ "alias": "self",
70
+ "as_strided": "self",
71
+ "diagonal": "self",
72
+ "expand": "self",
73
+ "permute": "self",
74
+ "select": "self",
75
+ "slice": "self",
76
+ "slice_inverse": "self",
77
+ "split": "self",
78
+ "split_with_sizes": "self",
79
+ "squeeze": "self",
80
+ "t": "self",
81
+ "transpose": "self",
82
+ "unfold": "self",
83
+ "unsqueeze": "self",
84
+ "flatten": "self",
85
+ "view": "self",
86
+ "unbind": "self",
87
+ "_indices": "self",
88
+ "_values": "self",
89
+ "indices": "self",
90
+ "values": "self",
91
+ "crow_indices": "self",
92
+ "col_indices": "self",
93
+ "ccol_indices": "self",
94
+ "row_indices": "self",
95
+ # sparse_coo ctor output should really be views of both indices and values,
96
+ # but we only supports making as view of a single variable, and indices is
97
+ # discrete anyways.
98
+ # FIXME: clone indices on construction.
99
+ "sparse_coo_tensor_with_dims_and_tensors": "values",
100
+ "_reshape_alias": "self",
101
+ "_test_autograd_multiple_dispatch_view": "self",
102
+ }
103
+
104
+ for key in VIEW_FUNCTIONS_WITH_METADATA_CHANGE:
105
+ VIEW_FUNCTIONS[key] = "self"
106
+
107
+ # note: some VIEW_FUNCTIONS are just compositions of the view functions above
108
+ # this list contains both the root view functions and any that are purely composed
109
+ # of viewing functions, and is used by the JIT to determine when an operator
110
+ # may return a view of its inputs; however they may sometimes return a copy.
111
+ # (e.g. `contiguous`)
112
+ RETURNS_VIEWS_OF_INPUT = set(VIEW_FUNCTIONS.keys()).union(
113
+ {
114
+ "chunk",
115
+ "detach",
116
+ "contiguous",
117
+ "reshape",
118
+ "reshape_as",
119
+ "expand_as",
120
+ "view_as",
121
+ "real",
122
+ "imag",
123
+ "narrow",
124
+ "movedim",
125
+ "tensor_split",
126
+ "swapdims",
127
+ "swapaxes",
128
+ "mT",
129
+ "mH",
130
+ "adjoint",
131
+ "matrix_H",
132
+ }
133
+ )
134
+
135
+ # These are the functions we consider views for the purposes of validating
136
+ # StorageImpl and TensorImpl in gen_variable_type.
137
+ # `_unsafe_view` is not included in VIEW_FUNCTIONS above because it is not a
138
+ # view for the purposes of ADInplaceOrView kernel, we do not want to call as_view
139
+ # See NOTE [Unsafe View] for more info.
140
+ ALL_VIEW_FUNCTIONS = {
141
+ **VIEW_FUNCTIONS,
142
+ "_unsafe_view": "self",
143
+ }
144
+
145
+ ARRAYREF_TO_VEC = CodeTemplate(
146
+ """\
147
+ auto ${vec} = ${arg}.vec();
148
+ """
149
+ )
150
+
151
+ OPTIONAL_TO_VAL = CodeTemplate(
152
+ """\
153
+ auto ${val} = ${arg}.value_or(${default});
154
+ """
155
+ )
156
+
157
+ CALL_DISPATCH = CodeTemplate(
158
+ """\
159
+ at::_ops::${unambiguous_name}::call(${unpacked_args})"""
160
+ )
161
+
162
+ REVERSE_VIEW_DISPATCH = CodeTemplate(
163
+ """\
164
+ ${reverse_name}(${unpacked_args})"""
165
+ )
166
+
167
+ MULTI_OUTPUT_VIEW_ITERATION = CodeTemplate(
168
+ """\
169
+ for (auto ${view_idx} : c10::irange(${var}.size())) {
170
+ ${body}
171
+ }
172
+ """
173
+ )
174
+
175
+ SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE = CodeTemplate(
176
+ """\
177
+ std::unique_ptr<torch::autograd::ViewFunc> func(nullptr);
178
+ std::function<at::Tensor(const at::Tensor&)> rev_func=nullptr;
179
+ if (${is_view_with_metadata_change} ||
180
+ !self.unsafeGetTensorImpl()->support_as_strided() ||
181
+ self.unsafeGetTensorImpl()->is_python_dispatch() ||
182
+ c10::AutogradState::get_tls_state().get_view_replay_enabled()) {
183
+ ${replay_view_func}
184
+ ${reverse_replay_view_func}
185
+ }
186
+ """
187
+ )
188
+
189
+ REPLAY_VIEW_FUNC = CodeTemplate(
190
+ """\
191
+ func = std::make_unique<${view_func_name}>(${view_func_args});
192
+ """
193
+ )
194
+
195
+ REVERSE_REPLAY_VIEW_LAMBDA_FUNC = CodeTemplate(
196
+ """\
197
+ rev_func = [=](const at::Tensor& ${input_view}) {
198
+ return ${reverse_replay_view_call};
199
+ };
200
+ """
201
+ )
202
+
203
+ METHOD_DEFINITION = CodeTemplate(
204
+ """\
205
+ ${return_type} ${type_wrapper_name}(${formals}) {
206
+ ${type_definition_body}
207
+ }
208
+ """
209
+ )
210
+
211
+ WRAPPER_REGISTRATION = CodeTemplate(
212
+ """\
213
+ m.impl("${unqual_operator_name_with_overload}",
214
+ TORCH_FN(${class_type}::${type_wrapper_name})
215
+ );
216
+ """
217
+ )
218
+
219
+ AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION = CodeTemplate(
220
+ """\
221
+ m.impl("${unqual_operator_name_with_overload}", torch::autograd::autogradNotImplementedFallback());
222
+ """
223
+ )
224
+
225
+ INPLACE_REDISPATCH = CodeTemplate(
226
+ """\
227
+ {
228
+ at::AutoDispatchBelowADInplaceOrView guard;
229
+ at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
230
+ }
231
+ """
232
+ )
233
+
234
+ ASSIGN_RETURN_VALUE = CodeTemplate(
235
+ """\
236
+ ${return_values} = ${rhs_value};
237
+ """
238
+ )
239
+
240
+ VIEW_REDISPATCH = CodeTemplate(
241
+ """\
242
+ ${assign_return_values} ([&]() {
243
+ at::AutoDispatchBelowADInplaceOrView guard;
244
+ return at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
245
+ })();
246
+ """
247
+ )
248
+
249
+ TMP_VAR = "_tmp"
250
+
251
+
252
+ # FIXME: Ideally these functions should be methods on Type class, but we have a
253
+ # comment in codegen/model.py there saying these concepts are not well defined.
254
+ # Thus we put a version that commonly used by autograd codegen here.
255
+ def is_tensor_type(t: Type) -> bool:
256
+ # TODO: Should handle optional here?
257
+ return t.is_tensor_like() and t.is_list_like() is None
258
+
259
+
260
+ def is_tensor_list_type(t: Type) -> bool:
261
+ # TODO: Should handle optional here?
262
+ return t.is_tensor_like() and t.is_list_like() is not None
263
+
264
+
265
+ UNPACK_TENSOR = CodeTemplate(
266
+ """\
267
+ auto${ref} ${arg_name}_ = unpack${suffix}(${arg_name}, "${arg_name}", ${arg_pos});"""
268
+ )
269
+
270
+
271
+ def unpacked_name(arg_name: str) -> str:
272
+ return arg_name + "_"
273
+
274
+
275
+ # e.g. select.int -> select_copy_int_inverse()
276
+ def inverse_view_name(f: NativeFunction) -> str:
277
+ copy_variant = f"{f.root_name}_copy"
278
+ overload = f"{f.func.name.overload_name}"
279
+ if overload != "":
280
+ overload = "_" + overload
281
+ return f"{copy_variant}{overload}_inverse"
282
+
283
+
284
+ def extract_bindings(f: NativeFunction) -> List[Binding]:
285
+ return [
286
+ r
287
+ for a in f.func.schema_order_arguments()
288
+ for r in cpp.argument(
289
+ a,
290
+ method=False,
291
+ symint=True,
292
+ cpp_no_default_args=set(),
293
+ faithful=False,
294
+ has_tensor_options=False,
295
+ )
296
+ ]
297
+
298
+
299
+ @with_native_function
300
+ def unpack_args(f: NativeFunction) -> Tuple[List[str], List[Binding]]:
301
+ body: List[str] = []
302
+ unpacked_bindings: List[Binding] = []
303
+
304
+ for i, binding in enumerate(extract_bindings(f)):
305
+ assert not isinstance(binding.argument, SelfArgument)
306
+ if isinstance(binding.argument, TensorOptionsArguments):
307
+ raise RuntimeError("VariableKernel shouldn't take TensorOptions")
308
+
309
+ is_nullable = binding.argument.type.is_nullable()
310
+ if not binding.argument.type.is_tensor_like() or is_nullable:
311
+ unpacked_bindings.append(binding)
312
+ continue
313
+
314
+ is_tensor_list = is_tensor_list_type(binding.argument.type)
315
+ ref = (not is_nullable) and not is_tensor_list
316
+ suffix = "_opt" if is_nullable and not is_tensor_list else ""
317
+ body.append(
318
+ UNPACK_TENSOR.substitute(
319
+ arg_name=binding.name,
320
+ arg_pos=i,
321
+ suffix=suffix,
322
+ ref="&" if ref else "",
323
+ )
324
+ )
325
+ unpacked_bindings.append(
326
+ Binding(
327
+ name=unpacked_name(binding.name),
328
+ nctype=binding.nctype,
329
+ argument=binding.argument,
330
+ default=binding.default,
331
+ )
332
+ )
333
+
334
+ return body, unpacked_bindings
335
+
336
+
337
+ def get_base_name(f: NativeFunction) -> str:
338
+ return f.func.name.name.base # TODO: should be str(f.func.name.name)?
339
+
340
+
341
+ def get_view_info(f: NativeFunction) -> Optional[str]:
342
+ base_name = get_base_name(f)
343
+ view_info = VIEW_FUNCTIONS.get(base_name, None)
344
+ if view_info is None and base_name in RETURNS_VIEWS_OF_INPUT:
345
+ view_info = "self"
346
+ return view_info
347
+
348
+
349
+ def emit_view_func(
350
+ f: NativeFunction, bindings: List[Binding], view_idx: Optional[str] = None
351
+ ) -> str:
352
+ """Generate an additional lambda function to recover views in backward when as_strided is not supported.
353
+ See Note [View + Inplace update for base tensor] and [View + Inplace update for view tensor] for more details.
354
+ """
355
+ # TODO: Clean this logic up if we get rid of reverse view funcs or reify them.
356
+ input_base = "input_base"
357
+ replay_view_func = ""
358
+ updated_args: List[str] = []
359
+ known_view_arg_simple_types: List[CType] = [
360
+ BaseCType(longT),
361
+ OptionalCType(BaseCType(longT)),
362
+ BaseCType(SymIntT),
363
+ OptionalCType(BaseCType(SymIntT)),
364
+ BaseCType(boolT),
365
+ BaseCType(intArrayRefT),
366
+ BaseCType(symIntArrayRefT),
367
+ ConstRefCType(BaseCType(tensorT)),
368
+ ConstRefCType(OptionalCType(BaseCType(tensorT))),
369
+ ]
370
+ for binding in bindings:
371
+ arg, arg_type = binding.name, binding.nctype.type
372
+ if arg == "self":
373
+ updated_args.append(input_base)
374
+ continue
375
+ if arg_type not in known_view_arg_simple_types:
376
+ known_types_str = ", ".join([str(t) for t in known_view_arg_simple_types])
377
+ raise TypeError(
378
+ f"You are adding an {arg_type} {arg} argument to op {cpp.name(f.func)} in addition to known types: "
379
+ f"{known_types_str}. Please update the list or materialize it so that it can be closed "
380
+ "over by value, also add a test in pytorch/xla/test/test_operations.py where this code "
381
+ "is exercised."
382
+ )
383
+ if arg_type == BaseCType(intArrayRefT) or arg_type == BaseCType(
384
+ symIntArrayRefT
385
+ ):
386
+ # It's not safe to close over IntArrayRef by value, since this is a
387
+ # reference type, so materialize a vector to close over by value
388
+ arg_vec = arg + "_vec"
389
+ replay_view_func += ARRAYREF_TO_VEC.substitute(arg=arg, vec=arg_vec)
390
+ updated_args.append(arg_vec)
391
+ elif arg_type == OptionalCType(BaseCType(longT)):
392
+ # Materialize int64_t? to int64_t
393
+ arg_value = arg + "_val"
394
+ replay_view_func += OPTIONAL_TO_VAL.substitute(
395
+ arg=arg, val=arg_value, default="0"
396
+ )
397
+ updated_args.append(arg_value)
398
+ elif arg_type == ConstRefCType(BaseCType(tensorT)) or arg_type == ConstRefCType(
399
+ OptionalCType(BaseCType(tensorT))
400
+ ):
401
+ # NB: Closing over a tensor. If a user modifies this tensor, this will be silently
402
+ # incorrect. The proper thing to do is to store the version counter and copy on write.
403
+ updated_args.append(arg)
404
+ else:
405
+ updated_args.append(arg)
406
+
407
+ from .gen_view_funcs import view_func_name
408
+
409
+ view_func_args = [b.name for b in bindings if b.name != "self"]
410
+ if view_idx is not None:
411
+ view_func_args.append(f"{view_idx}")
412
+ replay_view_func += REPLAY_VIEW_FUNC.substitute(
413
+ view_func_name=view_func_name(f, include_namespace=True),
414
+ view_func_args=view_func_args,
415
+ )
416
+
417
+ input_view = "input_view"
418
+ reverse_unpacked_args = [
419
+ "self",
420
+ f"{input_view}",
421
+ # inverse_return_mode=
422
+ "at::functionalization::InverseReturnMode::AlwaysView",
423
+ *(() if view_idx is None else (f"{view_idx}",)),
424
+ # skip input_base arg
425
+ *updated_args[1:],
426
+ ]
427
+
428
+ from torchgen.api.functionalization import reverse_name
429
+
430
+ reverse_replay_view_call = REVERSE_VIEW_DISPATCH.substitute(
431
+ reverse_name=reverse_name(f, include_namespace=True),
432
+ unpacked_args=reverse_unpacked_args,
433
+ )
434
+ reverse_replay_view_func = REVERSE_REPLAY_VIEW_LAMBDA_FUNC.substitute(
435
+ input_view=input_view, reverse_replay_view_call=reverse_replay_view_call
436
+ )
437
+
438
+ is_view_with_metadata_change = (
439
+ "true" if cpp.name(f.func) in VIEW_FUNCTIONS_WITH_METADATA_CHANGE else "false"
440
+ )
441
+
442
+ return SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE.substitute(
443
+ is_view_with_metadata_change=is_view_with_metadata_change,
444
+ replay_view_func=replay_view_func,
445
+ reverse_replay_view_func=reverse_replay_view_func,
446
+ )
447
+
448
+
449
+ def emit_view_body(
450
+ fn: NativeFunctionWithDifferentiabilityInfo, var: str
451
+ ) -> Tuple[str, str]:
452
+ # See NOTE [ Autograd View Variables ] in variable.h for details.
453
+ f = fn.func
454
+ base_name = get_base_name(f)
455
+ view_info = get_view_info(f)
456
+ call = ""
457
+ differentiable_outputs = gen_differentiable_outputs(fn)
458
+ differentiable_output_vars = {r.name for r in differentiable_outputs}
459
+ if not isinstance(view_info, str):
460
+ raise TypeError(
461
+ f"The view info should be a string for {base_name}, but it is: {view_info}"
462
+ )
463
+ if len(differentiable_output_vars) == 0:
464
+ # no output is differentiable (.indices() for SparseTensors for example)
465
+ rhs_value = (
466
+ f"as_view({view_info}, {var}, "
467
+ f"/* is_bw_differentiable */ false, /* is_fw_differentiable */ false)"
468
+ )
469
+ elif len(differentiable_output_vars) == 1:
470
+ # Single differentiable output (Tensor or Tensor[])
471
+ return_info = differentiable_outputs[0]
472
+ # We only support simple Tensor or a TensorList for functions that return views
473
+ if not is_tensor_type(return_info.type) and not is_tensor_list_type(
474
+ return_info.type
475
+ ):
476
+ raise RuntimeError(
477
+ f"{base_name} that return differentiable views can only return Tensor or Tensor[]"
478
+ )
479
+
480
+ # See Note [ View + Inplace detection]
481
+ def get_creation_meta_in_mode(original: str) -> str:
482
+ creation_meta_with_grad_mode = f"(at::GradMode::is_enabled() ? {original} : CreationMeta::NO_GRAD_MODE)"
483
+ return f"InferenceMode::is_enabled() ? CreationMeta::INFERENCE_MODE : {creation_meta_with_grad_mode}"
484
+
485
+ # Only allow rebasing of the history if we return a single Tensor
486
+ # If we are in a no grad block, raise a warning
487
+ # See NOTE [ View + Inplace detection ] for more details about this logic
488
+ if is_tensor_list_type(return_info.type):
489
+ creation_meta = get_creation_meta_in_mode("CreationMeta::MULTI_OUTPUT_NODE")
490
+ view_idx = "view_idx"
491
+ view_func = emit_view_func(
492
+ f, extract_bindings(f), view_idx=view_idx
493
+ ).strip()
494
+ as_view_call = (
495
+ f"as_view(/* base */ {view_info}, /* output */ {var}[{view_idx}], "
496
+ "/* is_bw_differentiable */ true, /* is_fw_differentiable */ true, "
497
+ "/* view_func */ std::move(func), /* rev_view_func */ rev_func, "
498
+ f"/* creation_meta */ {creation_meta});"
499
+ )
500
+ call += MULTI_OUTPUT_VIEW_ITERATION.substitute(
501
+ var=var, view_idx=view_idx, body=f"{view_func}\n{as_view_call}"
502
+ )
503
+ rhs_value = f"std::move({var})"
504
+ else:
505
+ call += emit_view_func(f, extract_bindings(f), view_idx=None)
506
+ creation_meta = get_creation_meta_in_mode("CreationMeta::DEFAULT")
507
+ rhs_value = (
508
+ f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, "
509
+ "/* is_fw_differentiable */ true, "
510
+ f"/* view_func */ std::move(func), /* rev_view_func */ rev_func, /* creation_meta */ {creation_meta})"
511
+ )
512
+ else:
513
+ # This could be supported but we don't need it at the moment, so keeping things simple.
514
+ raise RuntimeError(
515
+ "Function that return multiple differentiable output "
516
+ "when at least one of them is view is not supported."
517
+ )
518
+ return call, rhs_value
519
+
520
+
521
+ def modifies_arguments(f: NativeFunction) -> bool:
522
+ return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
523
+
524
+
525
+ @with_native_function_with_differentiability_info
526
+ def emit_inplace_or_view_body(fn: NativeFunctionWithDifferentiabilityInfo) -> List[str]:
527
+ f = fn.func
528
+ inplace_view_body: List[str] = []
529
+
530
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
531
+ dispatcher_exprs = dispatcher_sig.exprs()
532
+
533
+ # code-generated ADInplaceOrView kernels plumb and recompute dispatch keys directly through the kernel for performance.
534
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
535
+ dispatch_key_set = "ks & c10::after_ADInplaceOrView_keyset"
536
+ redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs])
537
+
538
+ # Note that this calls the slow, dispatching variants of manual_cpp_binding ops.
539
+ # We could probably work harder to ensure that the fast variants are called instead, but the perf benefit would be minimal.
540
+ if modifies_arguments(f): # inplace op
541
+ inplace_view_body.append(
542
+ INPLACE_REDISPATCH.substitute(
543
+ unambiguous_name=f.func.name.unambiguous_name(),
544
+ unpacked_args=redispatch_args,
545
+ )
546
+ )
547
+ for r in cpp.return_names(f):
548
+ inplace_view_body.append(f"increment_version({r});")
549
+ else:
550
+ assert get_view_info(f) is not None
551
+ inplace_view_body.append(
552
+ VIEW_REDISPATCH.substitute(
553
+ assign_return_values="auto " + TMP_VAR + " = ",
554
+ unambiguous_name=f.func.name.unambiguous_name(),
555
+ unpacked_args=redispatch_args,
556
+ )
557
+ )
558
+ call, rhs_value = emit_view_body(fn, TMP_VAR)
559
+ inplace_view_body.append(call)
560
+ assert rhs_value is not None
561
+ inplace_view_body.append(
562
+ ASSIGN_RETURN_VALUE.substitute(
563
+ return_values=tie_return_values(f), rhs_value=rhs_value
564
+ )
565
+ )
566
+ if f.func.returns:
567
+ inplace_view_body.append(f"return {get_return_value(f)};")
568
+ return inplace_view_body
569
+
570
+
571
+ @with_native_function
572
+ def gen_formals(f: NativeFunction) -> str:
573
+ return ", ".join(
574
+ # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
575
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
576
+ ["c10::DispatchKeySet ks"]
577
+ + [
578
+ f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}'
579
+ for a in f.func.schema_order_arguments()
580
+ ]
581
+ )
582
+
583
+
584
+ @with_native_function_with_differentiability_info
585
+ def inplace_or_view_method_definition(
586
+ fn: NativeFunctionWithDifferentiabilityInfo,
587
+ ) -> Optional[str]:
588
+ f = fn.func
589
+ if get_view_info(f) is None and (
590
+ # For functions that modify their inputs but don't return them,
591
+ # we can't give them autograd support.
592
+ # See https://github.com/pytorch/pytorch/issues/53796
593
+ not modifies_arguments(f)
594
+ or len(f.func.returns) == 0
595
+ ):
596
+ return None
597
+ return METHOD_DEFINITION.substitute(
598
+ return_type=cpp.returns_type(f.func.returns, symint=True).cpp_type(),
599
+ type_wrapper_name=type_wrapper_name(f),
600
+ formals=gen_formals(f),
601
+ type_definition_body=emit_inplace_or_view_body(fn),
602
+ )
603
+
604
+
605
+ @with_native_function_with_differentiability_info
606
+ def inplace_or_view_method_registration(
607
+ fn: NativeFunctionWithDifferentiabilityInfo,
608
+ ) -> Optional[str]:
609
+ f = fn.func
610
+ if get_view_info(f) is None and (
611
+ not modifies_arguments(f) or len(f.func.returns) == 0
612
+ ):
613
+ return None
614
+ return WRAPPER_REGISTRATION.substitute(
615
+ unqual_operator_name_with_overload=f.func.name,
616
+ type_wrapper_name=type_wrapper_name(f),
617
+ class_type="ADInplaceOrView",
618
+ )
619
+
620
+
621
+ def use_derived(fn: NativeFunctionWithDifferentiabilityInfo) -> bool:
622
+ f = fn.func
623
+ name = cpp.name(f.func)
624
+ return name not in MANUAL_AUTOGRAD and dispatch_strategy(fn) == "use_derived"
625
+
626
+
627
+ def gen_inplace_or_view_type_env(
628
+ fn: NativeFunctionWithDifferentiabilityInfo,
629
+ ) -> Dict[str, List[str]]:
630
+ definition = inplace_or_view_method_definition(fn)
631
+ registration = inplace_or_view_method_registration(fn)
632
+
633
+ return {
634
+ "ops_headers": (
635
+ [f"#include <ATen/ops/{fn.func.root_name}_ops.h>"]
636
+ if definition is not None
637
+ else []
638
+ ),
639
+ "inplace_or_view_method_definitions": [definition]
640
+ if definition is not None
641
+ else [],
642
+ "inplace_or_view_wrapper_registrations": [registration]
643
+ if registration is not None
644
+ else [],
645
+ }
646
+
647
+
648
+ def gen_inplace_or_view_type(
649
+ out: str,
650
+ native_yaml_path: str,
651
+ tags_yaml_path: str,
652
+ fns_with_infos: List[NativeFunctionWithDifferentiabilityInfo],
653
+ template_path: str,
654
+ ) -> None:
655
+ # NOTE: see Note [Sharded File] at the top of the VariableType.cpp
656
+ # template regarding sharding of the generated files.
657
+ num_shards = 2
658
+
659
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
660
+ fm.write_sharded(
661
+ "ADInplaceOrViewType.cpp",
662
+ [fn for fn in fns_with_infos if use_derived(fn)],
663
+ key_fn=lambda fn: fn.func.root_name,
664
+ base_env={
665
+ "generated_comment": "@"
666
+ + f"generated from {fm.template_dir_for_comments()}/ADInplaceOrViewType.cpp",
667
+ },
668
+ env_callable=gen_inplace_or_view_type_env,
669
+ num_shards=2,
670
+ sharded_keys={
671
+ "ops_headers",
672
+ "inplace_or_view_method_definitions",
673
+ "inplace_or_view_wrapper_registrations",
674
+ },
675
+ )
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_python_functions.py ADDED
@@ -0,0 +1,1396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates Python bindings for ATen functions
2
+ #
3
+ # The bindings are generated as methods on python_variable or functions on the
4
+ # torch._C._nn. torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse
5
+ # or torch._C._special objects.
6
+ #
7
+
8
+ # Code tries to stick to the following rules:
9
+ #
10
+ # - templates should be colocated with the functions that use them.
11
+ # no templates are currently shared between functions, but if that
12
+ # happens, maybe put the template with the first one
13
+ #
14
+ # - don't use environment dictionaries when calling template.substitute().
15
+ # pass named arguments directly for everything, otherwise it's much too
16
+ # hard to track what's actually being used and by who
17
+ #
18
+ # - colocate any new hacks/adjustments with existing ones of the same kind.
19
+ # ideally in a data structure rather than code if possible. See e.g.
20
+ # SCHEMA_DEFAULT_CONVERSION_HACKS, etc.
21
+ #
22
+ # - similarly, conversions from one format to another should ideally happen
23
+ # all at once in a single place.
24
+ #
25
+ # - no nontrivial nested functions. couple-liners are ok but please no more.
26
+ # especially avoid functions that read/write outer variables defined far away.
27
+ #
28
+ # - raise RuntimeError instead of asserting, and put as much
29
+ # information as is available into the message. I.e. no need to
30
+ # plumb in new params whose only purpose is to fill out an error
31
+ # message, but use what's there
32
+ #
33
+
34
+ import itertools
35
+ import re
36
+ from collections import defaultdict
37
+
38
+ from typing import Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple
39
+
40
+ import yaml
41
+ from torchgen.api import cpp
42
+ from torchgen.api.python import (
43
+ arg_parser_output_exprs,
44
+ cpp_dispatch_exprs,
45
+ cpp_dispatch_target,
46
+ dispatch_lambda_args,
47
+ dispatch_lambda_exprs,
48
+ dispatch_lambda_return_str,
49
+ has_tensor_options,
50
+ PythonSignature,
51
+ PythonSignatureDeprecated,
52
+ PythonSignatureGroup,
53
+ PythonSignatureNativeFunctionPair,
54
+ signature,
55
+ signature_from_schema,
56
+ structseq_fieldnames,
57
+ )
58
+
59
+ from torchgen.code_template import CodeTemplate
60
+ from torchgen.context import with_native_function
61
+ from torchgen.gen import cpp_string, parse_native_yaml, parse_tags_yaml
62
+ from torchgen.model import (
63
+ Argument,
64
+ BaseOperatorName,
65
+ FunctionSchema,
66
+ NativeFunction,
67
+ SchemaKind,
68
+ Type,
69
+ Variant,
70
+ )
71
+ from torchgen.utils import FileManager, split_name_params
72
+ from torchgen.yaml_utils import YamlLoader
73
+
74
+ from .gen_inplace_or_view_type import is_tensor_list_type
75
+ from .gen_trace_type import should_trace
76
+
77
+ #
78
+ # declarations blocklist
79
+ # We skip codegen for these functions, for various reasons.
80
+ # Future PRs will categorize this list and eliminate or hoist
81
+ # them out of eager-only codegen.
82
+ # See https://github.com/pytorch/pytorch/issues/30788
83
+ #
84
+
85
+ # These functions require manual Python bindings or are not exposed to Python
86
+ _SKIP_PYTHON_BINDINGS = [
87
+ "alias",
88
+ "contiguous",
89
+ "is_cuda",
90
+ "is_sparse",
91
+ "is_sparse_csr",
92
+ "size",
93
+ "stride",
94
+ "sym_size",
95
+ "sym_stride",
96
+ "sym_storage_offset",
97
+ "sym_numel",
98
+ ".*_backward",
99
+ ".*_backward_(out|input|weight|bias)",
100
+ ".*_forward",
101
+ ".*_forward_out",
102
+ ".*_jvp",
103
+ "_unsafe_view",
104
+ "tensor",
105
+ "_?sparse_(coo|compressed|csr|csc|bsr|bsc)_tensor.*",
106
+ "_range.*",
107
+ "_sparse_add_out",
108
+ "_sparse_div.*",
109
+ "_sparse_mul.*",
110
+ "_sparse_sub.*",
111
+ "_sparse_dense_add_out",
112
+ "index",
113
+ "index_out",
114
+ "unique_dim_consecutive",
115
+ "_cumsum.*",
116
+ "_cumprod.*",
117
+ "_sum.*",
118
+ "_prod.*",
119
+ "_th_.*",
120
+ "_thnn_.*",
121
+ "range.*",
122
+ "_solve.*",
123
+ "_inverse.*",
124
+ "_cholesky.*",
125
+ "_triangular_solve.*",
126
+ "_qr.*",
127
+ "_svd.*",
128
+ "slice",
129
+ "item",
130
+ "_local_scalar_dense",
131
+ "to",
132
+ "_to_copy",
133
+ "_to_copy_out",
134
+ "_reshape_copy",
135
+ "_reshape_copy_out",
136
+ "copy_sparse_to_sparse_",
137
+ "copy_",
138
+ "numpy_T",
139
+ "matrix_H",
140
+ "mT",
141
+ "mH", # these need to be an attributes in Python, not functions
142
+ "nonzero(_(out|numpy))?",
143
+ "set_data",
144
+ ".*_overrideable", # overrideable functions for backend extension
145
+ "data",
146
+ "is_leaf",
147
+ "output_nr",
148
+ "_version",
149
+ "requires_grad_",
150
+ "retains_grad",
151
+ "set_",
152
+ "_fw_primal",
153
+ "fake_quantize_per_tensor_affine_cachemask",
154
+ "fake_quantize_per_channel_affine_cachemask",
155
+ "_new_zeros_with_same_feature_meta",
156
+ "_has_same_storage_numel", # used for forward AD internals
157
+ "_reshape_alias",
158
+ "replace_", # only used by the functionalization pass, doesn't need to be exposed to python
159
+ "copy", # only used by the functionalization pass
160
+ "fill.Tensor", # only used by the functionalization pass
161
+ "fill.Scalar", # only used by the functionalization pass
162
+ "lift.*",
163
+ "normal_functional", # only used by the functionalization pas
164
+ "nbytes",
165
+ "itemsize",
166
+ ]
167
+
168
+ SKIP_PYTHON_BINDINGS = [
169
+ re.compile(rf"^{pattern}$") for pattern in _SKIP_PYTHON_BINDINGS
170
+ ]
171
+
172
+ # These function signatures are not exposed to Python. Note that this signature
173
+ # list does not support regex.
174
+ SKIP_PYTHON_BINDINGS_SIGNATURES = [
175
+ "add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor",
176
+ "add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)",
177
+ "sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor",
178
+ "sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)",
179
+ "mul.Scalar(Tensor self, Scalar other) -> Tensor",
180
+ "mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)",
181
+ "div.Scalar(Tensor self, Scalar other) -> Tensor",
182
+ "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)",
183
+ ]
184
+
185
+
186
+ @with_native_function
187
+ def should_generate_py_binding(f: NativeFunction) -> bool:
188
+ # NativeFunctions that are entirely code-generated should not get python bindings
189
+ # because these codegen implementations are often inefficient. A handful of
190
+ # view_copy style ops were exposed accidentally when they were handwritten and now
191
+ # that we are moving them to codegen for bc reasons we need to keep them exposed in
192
+ # python.
193
+ if "generated" in f.tags and "view_copy" not in f.tags:
194
+ return False
195
+
196
+ name = cpp.name(f.func)
197
+ for skip_regex in SKIP_PYTHON_BINDINGS:
198
+ if skip_regex.match(name):
199
+ return False
200
+
201
+ signature = str(f.func)
202
+ for pattern in SKIP_PYTHON_BINDINGS_SIGNATURES:
203
+ if pattern == signature:
204
+ return False
205
+ return True
206
+
207
+
208
+ def get_pycname(name: BaseOperatorName) -> str:
209
+ return f"THPVariable_{name}"
210
+
211
+
212
+ def is_noarg(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> bool:
213
+ return len(overloads) == 1 and overloads[0].signature.arguments_count() == 0
214
+
215
+
216
+ def is_py_variable_method(f: NativeFunction) -> bool:
217
+ return f.python_module is None and Variant.method in f.variants
218
+
219
+
220
+ def is_py_torch_function(f: NativeFunction) -> bool:
221
+ return f.python_module is None and Variant.function in f.variants
222
+
223
+
224
+ def is_py_nn_function(f: NativeFunction) -> bool:
225
+ return f.python_module == "nn"
226
+
227
+
228
+ def is_py_fft_function(f: NativeFunction) -> bool:
229
+ return f.python_module == "fft"
230
+
231
+
232
+ def is_py_linalg_function(f: NativeFunction) -> bool:
233
+ return f.python_module == "linalg"
234
+
235
+
236
+ def is_py_nested_function(f: NativeFunction) -> bool:
237
+ return f.python_module == "nested"
238
+
239
+
240
+ def is_py_sparse_function(f: NativeFunction) -> bool:
241
+ return f.python_module == "sparse"
242
+
243
+
244
+ def is_py_special_function(f: NativeFunction) -> bool:
245
+ return f.python_module == "special"
246
+
247
+
248
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
249
+ #
250
+ # Main Function
251
+ #
252
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
253
+
254
+
255
+ def gen(
256
+ out: str,
257
+ native_yaml_path: str,
258
+ tags_yaml_path: str,
259
+ deprecated_yaml_path: str,
260
+ template_path: str,
261
+ *,
262
+ symint: bool = True,
263
+ ) -> None:
264
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
265
+ native_functions = parse_native_yaml(
266
+ native_yaml_path, tags_yaml_path
267
+ ).native_functions
268
+ native_functions = list(filter(should_generate_py_binding, native_functions))
269
+
270
+ methods = load_signatures(native_functions, deprecated_yaml_path, method=True)
271
+ create_python_bindings(
272
+ fm,
273
+ methods,
274
+ is_py_variable_method,
275
+ None,
276
+ "python_variable_methods.cpp",
277
+ method=True,
278
+ symint=symint,
279
+ )
280
+
281
+ # NOTE: num_shards here must be synced with gatherTorchFunctions in
282
+ # torch/csrc/autograd/python_torch_functions_manual.cpp
283
+ functions = load_signatures(native_functions, deprecated_yaml_path, method=False)
284
+ create_python_bindings_sharded(
285
+ fm,
286
+ functions,
287
+ is_py_torch_function,
288
+ "torch",
289
+ "python_torch_functions.cpp",
290
+ method=False,
291
+ num_shards=3,
292
+ symint=symint,
293
+ )
294
+
295
+ create_python_bindings(
296
+ fm,
297
+ functions,
298
+ is_py_nn_function,
299
+ "torch.nn",
300
+ "python_nn_functions.cpp",
301
+ method=False,
302
+ symint=symint,
303
+ )
304
+
305
+ create_python_bindings(
306
+ fm,
307
+ functions,
308
+ is_py_fft_function,
309
+ "torch.fft",
310
+ "python_fft_functions.cpp",
311
+ method=False,
312
+ symint=symint,
313
+ )
314
+
315
+ create_python_bindings(
316
+ fm,
317
+ functions,
318
+ is_py_linalg_function,
319
+ "torch.linalg",
320
+ "python_linalg_functions.cpp",
321
+ method=False,
322
+ symint=symint,
323
+ )
324
+
325
+ create_python_bindings(
326
+ fm,
327
+ functions,
328
+ is_py_nested_function,
329
+ "torch.nested",
330
+ "python_nested_functions.cpp",
331
+ method=False,
332
+ )
333
+
334
+ create_python_bindings(
335
+ fm,
336
+ functions,
337
+ is_py_sparse_function,
338
+ "torch.sparse",
339
+ "python_sparse_functions.cpp",
340
+ method=False,
341
+ symint=symint,
342
+ )
343
+
344
+ create_python_bindings(
345
+ fm,
346
+ functions,
347
+ is_py_special_function,
348
+ "torch.special",
349
+ "python_special_functions.cpp",
350
+ method=False,
351
+ symint=symint,
352
+ )
353
+
354
+ # Currently, we only use `functions` to generate `return_types` bindings.
355
+ # All methods which return structseq have function variant at this point.
356
+ # If any method only operator with structseq is added in the future,
357
+ # we will have to address that.
358
+ create_python_return_type_bindings(
359
+ fm, functions, lambda fn: True, "python_return_types.cpp"
360
+ )
361
+ create_python_return_type_bindings_header(
362
+ fm, functions, lambda fn: True, "python_return_types.h"
363
+ )
364
+
365
+ valid_tags = parse_tags_yaml(tags_yaml_path)
366
+
367
+ def gen_tags_enum() -> Dict[str, str]:
368
+ return {
369
+ "enum_of_valid_tags": (
370
+ "".join(
371
+ [f'\n.value("{tag}", at::Tag::{tag})' for tag in sorted(valid_tags)]
372
+ )
373
+ )
374
+ }
375
+
376
+ fm.write("python_enum_tag.cpp", gen_tags_enum)
377
+
378
+
379
+ def group_filter_overloads(
380
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
381
+ pred: Callable[[NativeFunction], bool],
382
+ ) -> Dict[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]:
383
+ grouped: Dict[
384
+ BaseOperatorName, List[PythonSignatureNativeFunctionPair]
385
+ ] = defaultdict(list)
386
+ for pair in pairs:
387
+ if pred(pair.function):
388
+ grouped[pair.function.func.name.name].append(pair)
389
+ return grouped
390
+
391
+
392
+ def create_python_bindings(
393
+ fm: FileManager,
394
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
395
+ pred: Callable[[NativeFunction], bool],
396
+ module: Optional[str],
397
+ filename: str,
398
+ *,
399
+ method: bool,
400
+ symint: bool = True,
401
+ ) -> None:
402
+ """Generates Python bindings to ATen functions"""
403
+ py_methods: List[str] = []
404
+ ops_headers: List[str] = []
405
+ py_method_defs: List[str] = []
406
+ py_forwards: List[str] = []
407
+
408
+ grouped = group_filter_overloads(pairs, pred)
409
+
410
+ for name in sorted(grouped.keys(), key=str):
411
+ overloads = grouped[name]
412
+ py_methods.append(
413
+ method_impl(name, module, overloads, method=method, symint=symint)
414
+ )
415
+ py_method_defs.append(method_def(name, module, overloads, method=method))
416
+ py_forwards.extend(forward_decls(name, overloads, method=method))
417
+ ops_headers.append(f"#include <ATen/ops/{name.base}.h>")
418
+
419
+ fm.write_with_template(
420
+ filename,
421
+ filename,
422
+ lambda: {
423
+ "generated_comment": "@"
424
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
425
+ "ops_headers": ops_headers,
426
+ "py_forwards": py_forwards,
427
+ "py_methods": py_methods,
428
+ "py_method_defs": py_method_defs,
429
+ },
430
+ )
431
+
432
+
433
+ def create_python_return_type_bindings(
434
+ fm: FileManager,
435
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
436
+ pred: Callable[[NativeFunction], bool],
437
+ filename: str,
438
+ ) -> None:
439
+ """
440
+ Generate function to initialize and return named tuple for native functions
441
+ which returns named tuple and registration invocations in `python_return_types.cpp`.
442
+ """
443
+ py_return_types_definition: List[str] = []
444
+ py_return_types_registrations: List[str] = []
445
+
446
+ grouped = group_filter_overloads(pairs, pred)
447
+
448
+ for name in sorted(grouped.keys(), key=str):
449
+ overloads = grouped[name]
450
+ definitions, registrations = generate_return_type_definition_and_registrations(
451
+ overloads
452
+ )
453
+ py_return_types_definition.append(
454
+ "" if not definitions else "\n".join(definitions)
455
+ )
456
+ py_return_types_registrations.append(
457
+ "" if not registrations else "\n".join(registrations)
458
+ )
459
+
460
+ fm.write_with_template(
461
+ filename,
462
+ filename,
463
+ lambda: {
464
+ "generated_comment": "@"
465
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
466
+ "py_return_types": py_return_types_definition,
467
+ "py_return_types_registrations": py_return_types_registrations,
468
+ },
469
+ )
470
+
471
+
472
+ def create_python_return_type_bindings_header(
473
+ fm: FileManager,
474
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
475
+ pred: Callable[[NativeFunction], bool],
476
+ filename: str,
477
+ ) -> None:
478
+ """
479
+ Generate function to initialize and return named tuple for native functions
480
+ which returns named tuple and relevant entry for the map in `python_return_types.cpp`.
481
+ """
482
+ py_return_types_declarations: List[str] = []
483
+
484
+ grouped = group_filter_overloads(pairs, pred)
485
+
486
+ for name in sorted(grouped.keys(), key=str):
487
+ overloads = grouped[name]
488
+ declarations = generate_return_type_declarations(overloads)
489
+ py_return_types_declarations.append(
490
+ "" if not declarations else "\n".join(declarations)
491
+ )
492
+
493
+ fm.write_with_template(
494
+ filename,
495
+ filename,
496
+ lambda: {
497
+ "generated_comment": "@"
498
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
499
+ "py_return_types_declarations": py_return_types_declarations,
500
+ },
501
+ )
502
+
503
+
504
+ def create_python_bindings_sharded(
505
+ fm: FileManager,
506
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
507
+ pred: Callable[[NativeFunction], bool],
508
+ module: Optional[str],
509
+ filename: str,
510
+ *,
511
+ method: bool,
512
+ num_shards: int,
513
+ symint: bool = True,
514
+ ) -> None:
515
+ """Generates Python bindings to ATen functions"""
516
+ grouped = group_filter_overloads(pairs, pred)
517
+
518
+ def key_func(
519
+ kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]
520
+ ) -> str:
521
+ return kv[0].base
522
+
523
+ def env_func(
524
+ kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]
525
+ ) -> Dict[str, List[str]]:
526
+ name, fn_pairs = kv
527
+ return {
528
+ "ops_headers": [f"#include <ATen/ops/{name.base}.h>"],
529
+ "py_forwards": list(forward_decls(name, fn_pairs, method=method)),
530
+ "py_methods": [
531
+ method_impl(name, module, fn_pairs, method=method, symint=symint)
532
+ ],
533
+ "py_method_defs": [method_def(name, module, fn_pairs, method=method)],
534
+ }
535
+
536
+ fm.write_sharded(
537
+ filename,
538
+ grouped.items(),
539
+ base_env={
540
+ "generated_comment": "@"
541
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
542
+ },
543
+ key_fn=key_func,
544
+ env_callable=env_func,
545
+ num_shards=num_shards,
546
+ sharded_keys={"ops_headers", "py_forwards", "py_methods", "py_method_defs"},
547
+ )
548
+
549
+
550
+ def load_signatures(
551
+ native_functions: List[NativeFunction],
552
+ deprecated_yaml_path: str,
553
+ *,
554
+ method: bool,
555
+ skip_deprecated: bool = False,
556
+ pyi: bool = False,
557
+ ) -> Sequence[PythonSignatureNativeFunctionPair]:
558
+ @with_native_function
559
+ def gen_signature_pairs(f: NativeFunction) -> PythonSignatureNativeFunctionPair:
560
+ return PythonSignatureNativeFunctionPair(
561
+ signature=signature(f, method=method, pyi=pyi),
562
+ function=f,
563
+ )
564
+
565
+ pairs = list(map(gen_signature_pairs, native_functions))
566
+ deprecated = load_deprecated_signatures(
567
+ pairs, deprecated_yaml_path, method=method, pyi=pyi
568
+ )
569
+ return pairs if skip_deprecated else pairs + deprecated
570
+
571
+
572
+ def load_deprecated_signatures(
573
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
574
+ deprecated_yaml_path: str,
575
+ *,
576
+ method: bool,
577
+ pyi: bool,
578
+ ) -> List[PythonSignatureNativeFunctionPair]:
579
+ # The deprecated.yaml doesn't have complete type information, we need
580
+ # find and leverage the original ATen signature (to which it delegates
581
+ # the call) to generate the full python signature.
582
+ # We join the deprecated and the original signatures using type-only form.
583
+
584
+ # group the original ATen signatures by name
585
+ grouped: Dict[str, List[PythonSignatureNativeFunctionPair]] = defaultdict(list)
586
+ for pair in pairs:
587
+ grouped[pair.signature.name].append(pair)
588
+
589
+ # find matching original signatures for each deprecated signature
590
+ results: List[PythonSignatureNativeFunctionPair] = []
591
+
592
+ with open(deprecated_yaml_path) as f:
593
+ deprecated_defs = yaml.load(f, Loader=YamlLoader)
594
+
595
+ for deprecated in deprecated_defs:
596
+ schema = FunctionSchema.parse(deprecated["name"])
597
+ aten_name, call_args = split_name_params(deprecated["aten"])
598
+ is_out = aten_name.endswith("_out")
599
+ if is_out:
600
+ aten_name = aten_name.replace("_out", "")
601
+
602
+ # HACK: these are fixed constants used to pass the aten function.
603
+ # The type must be known ahead of time
604
+ known_constants = {
605
+ "1": Type.parse("Scalar"),
606
+ }
607
+ schema_args_by_name = {a.name: a for a in schema.arguments.flat_all}
608
+ for name in call_args:
609
+ assert (
610
+ name in schema_args_by_name or name in known_constants
611
+ ), f"deprecation definiton: Unrecognized value {name}"
612
+
613
+ # Map deprecated signature arguments to their aten signature and test
614
+ # if the types and alias annotation match.
615
+ def is_schema_compatible(
616
+ aten_schema: FunctionSchema,
617
+ ) -> bool:
618
+ arguments: Iterable[Argument]
619
+ if is_out:
620
+ arguments = itertools.chain(
621
+ aten_schema.arguments.out, aten_schema.arguments.flat_non_out
622
+ )
623
+ else:
624
+ arguments = aten_schema.arguments.flat_all
625
+
626
+ for i, arg in enumerate(arguments):
627
+ if i < len(call_args):
628
+ arg_name = call_args[i]
629
+ if arg_name in known_constants:
630
+ schema_type = known_constants[arg_name]
631
+ schema_annotation = None
632
+ else:
633
+ schema_arg = schema_args_by_name[arg_name]
634
+ schema_type = schema_arg.type
635
+ schema_annotation = schema_arg.annotation
636
+
637
+ if schema_type != arg.type or schema_annotation != arg.annotation:
638
+ return False
639
+ else:
640
+ if arg.default is None:
641
+ return False
642
+
643
+ return len(schema.returns) == len(aten_schema.returns) and all(
644
+ a == b for a, b in zip(schema.returns, aten_schema.returns)
645
+ )
646
+
647
+ any_schema_found = False
648
+ for pair in grouped[aten_name]:
649
+ if not is_schema_compatible(pair.function.func):
650
+ continue
651
+ any_schema_found = True
652
+
653
+ python_sig = signature_from_schema(
654
+ schema,
655
+ category_override=pair.function.category_override,
656
+ method=method,
657
+ pyi=pyi,
658
+ )
659
+
660
+ results.append(
661
+ PythonSignatureNativeFunctionPair(
662
+ signature=PythonSignatureDeprecated(
663
+ name=python_sig.name,
664
+ input_args=python_sig.input_args,
665
+ input_kwargs=python_sig.input_kwargs,
666
+ output_args=python_sig.output_args,
667
+ tensor_options_args=python_sig.tensor_options_args,
668
+ method=python_sig.method,
669
+ deprecated_schema=schema,
670
+ deprecated_args_exprs=tuple(call_args),
671
+ returns=python_sig.returns,
672
+ ),
673
+ function=pair.function,
674
+ )
675
+ )
676
+ assert (
677
+ any_schema_found
678
+ ), f"No native function with name {aten_name} matched signature:\n {str(schema)}"
679
+
680
+ return results
681
+
682
+
683
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
684
+ #
685
+ # Named Tuple Codegen
686
+ #
687
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
688
+
689
+
690
+ @with_native_function
691
+ def gen_structseq_typename_key(f: NativeFunction) -> str:
692
+ name = cpp.name(f.func)
693
+ fieldnames = structseq_fieldnames(f.func.returns)
694
+ return "_".join([name] + fieldnames)
695
+
696
+
697
+ def emit_structseq_call(
698
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
699
+ ) -> Tuple[List[str], Dict[str, str]]:
700
+ """
701
+ Generate block of named tuple type def inits, and add typeref snippets
702
+ to declarations that use them
703
+ """
704
+ typenames: Dict[
705
+ str, str
706
+ ] = {} # map from unique name + field name lists to typedef name
707
+ typedefs: List[str] = [] # typedef declarations and init code
708
+
709
+ for overload in overloads:
710
+ fieldnames = structseq_fieldnames(overload.function.func.returns)
711
+ if not fieldnames:
712
+ continue
713
+
714
+ name = cpp.name(overload.function.func) # use @with_native_function?
715
+ tn_key = gen_structseq_typename_key(overload.function)
716
+ typename = typenames.get(tn_key)
717
+ if typename is None:
718
+ typename = f'NamedTuple{"" if not typedefs else len(typedefs)}'
719
+ typenames[tn_key] = typename
720
+ typedefs.append(
721
+ f"""\
722
+ static PyTypeObject* {typename} = generated::get_{name}_structseq();"""
723
+ )
724
+
725
+ return typedefs, typenames
726
+
727
+
728
+ def generate_return_type_definition_and_registrations(
729
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
730
+ ) -> Tuple[List[str], List[str]]:
731
+ """
732
+ Generate block of function in `python_return_types.cpp` to initialize
733
+ and return named tuple for a native function which returns named tuple
734
+ and registration invocations in same file.
735
+ """
736
+ typenames: Dict[
737
+ str, str
738
+ ] = {} # map from unique name + field name lists to typedef name
739
+ definitions: List[str] = [] # function definition to register the typedef
740
+ registrations: List[str] = [] # register call for the typedef
741
+
742
+ for overload in overloads:
743
+ fieldnames = structseq_fieldnames(overload.function.func.returns)
744
+ if not fieldnames:
745
+ continue
746
+
747
+ fields = ", ".join(f'{{"{fn}", ""}}' for fn in fieldnames)
748
+
749
+ name = cpp.name(overload.function.func) # use @with_native_function?
750
+ tn_key = gen_structseq_typename_key(overload.function)
751
+ typename = typenames.get(tn_key)
752
+
753
+ if typename is None:
754
+ typename = f'{name}NamedTuple{"" if not definitions else len(definitions)}'
755
+ typenames[tn_key] = typename
756
+ definitions.append(
757
+ f"""\
758
+ PyTypeObject* get_{name}_structseq() {{
759
+ static PyStructSequence_Field NamedTuple_fields[] = {{ {fields}, {{nullptr}} }};
760
+ static PyTypeObject {typename};
761
+ static bool is_initialized = false;
762
+ static PyStructSequence_Desc desc = {{ "torch.return_types.{name}", nullptr, NamedTuple_fields, {len(fieldnames)} }};
763
+ if (!is_initialized) {{
764
+ PyStructSequence_InitType(&{typename}, &desc);
765
+ {typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;
766
+ is_initialized = true;
767
+ }}
768
+ return &{typename};
769
+ }}
770
+ """
771
+ )
772
+ registrations.append(
773
+ f'addReturnType(return_types_module, "{name}", generated::get_{name}_structseq());'
774
+ )
775
+
776
+ return definitions, registrations
777
+
778
+
779
+ def generate_return_type_declarations(
780
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
781
+ ) -> List[str]:
782
+ """
783
+ Generate block of function declarations in `python_return_types.h` to initialize
784
+ and return named tuple for a native function.
785
+ """
786
+ typenames: Dict[
787
+ str, str
788
+ ] = {} # map from unique name + field name lists to typedef name
789
+ declarations: List[str] = [] # function declaration to register the typedef
790
+
791
+ for overload in overloads:
792
+ fieldnames = structseq_fieldnames(overload.function.func.returns)
793
+ if not fieldnames:
794
+ continue
795
+
796
+ name = cpp.name(overload.function.func) # use @with_native_function?
797
+ tn_key = gen_structseq_typename_key(overload.function)
798
+ typename = typenames.get(tn_key)
799
+
800
+ if typename is None:
801
+ typename = (
802
+ f'{name}NamedTuple{"" if not declarations else len(declarations)}'
803
+ )
804
+ typenames[tn_key] = typename
805
+ declarations.append(f"PyTypeObject* get_{name}_structseq();")
806
+
807
+ return declarations
808
+
809
+
810
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
811
+ #
812
+ # Method Impl Codegen
813
+ #
814
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
815
+
816
+ # python binding for all overloads of a particular function/method
817
+ PY_VARIABLE_METHOD_VARARGS = CodeTemplate(
818
+ r"""\
819
+ // ${name}
820
+ static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs)
821
+ {
822
+ ${method_header}
823
+ static PythonArgParser parser({
824
+ ${signatures}
825
+ }, /*traceable=*/${traceable});
826
+
827
+ ParsedArgs<${max_args}> parsed_args;
828
+ auto _r = parser.parse(${self_}, args, kwargs, parsed_args);
829
+ ${check_has_torch_function}
830
+ switch (_r.idx) {
831
+ ${dispatch}
832
+ }
833
+ ${method_footer}
834
+ }
835
+
836
+ """
837
+ )
838
+
839
+ # handler for a single parsed signature - may be a single overload or
840
+ # a pair of overloads that whose signatures only differ in output params
841
+ # (plugged into PY_VARIABLE_METHOD_VARARGS as an item in ${dispatch})
842
+ PY_VARIABLE_CASE = CodeTemplate(
843
+ """\
844
+ case ${overload_index}: {
845
+ ${body}
846
+ }
847
+ """
848
+ )
849
+
850
+ # python binding for single-overload function/method
851
+ PY_VARIABLE_METHOD_VARARGS_SINGLETON = CodeTemplate(
852
+ """\
853
+ // ${name}
854
+ static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs)
855
+ {
856
+ ${method_header}
857
+ static PythonArgParser parser({
858
+ ${signatures}
859
+ }, /*traceable=*/${traceable});
860
+
861
+ ParsedArgs<${max_args}> parsed_args;
862
+ auto _r = parser.parse(${self_}, args, kwargs, parsed_args);
863
+ ${check_has_torch_function}
864
+ ${dispatch}
865
+ ${method_footer}
866
+ }
867
+
868
+ """
869
+ )
870
+
871
+ # python binding for a method with no args, shortcuts parsing
872
+ PY_VARIABLE_METHOD_NOARGS = CodeTemplate(
873
+ """\
874
+ // ${name}
875
+ static PyObject * ${pycname}(PyObject* self_, PyObject* args)
876
+ {
877
+ ${method_header}
878
+ ${check_has_torch_function}
879
+ ${dispatch}
880
+ ${method_footer}
881
+ }
882
+
883
+ """
884
+ )
885
+
886
+
887
+ def method_impl(
888
+ name: BaseOperatorName,
889
+ module: Optional[str],
890
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
891
+ *,
892
+ method: bool,
893
+ symint: bool = True,
894
+ ) -> str:
895
+ """
896
+ Generate a python binding for all overloads of an op.
897
+ """
898
+ pycname = get_pycname(name)
899
+ noarg = is_noarg(overloads)
900
+ structseq_inits, structseq_typenames = emit_structseq_call(overloads)
901
+
902
+ method_header = ["HANDLE_TH_ERRORS"]
903
+ method_header += structseq_inits
904
+ method_header += (
905
+ ["const Tensor& self = THPVariable_Unpack(self_);"] if method else []
906
+ )
907
+
908
+ method_footer = ([] if noarg else ["Py_RETURN_NONE;"]) + ["END_HANDLE_TH_ERRORS"]
909
+
910
+ traceable = "true" if all(should_trace(o.function) for o in overloads) else "false"
911
+
912
+ grouped_overloads: Sequence[PythonSignatureGroup] = group_overloads(
913
+ overloads, symint=symint
914
+ )
915
+ is_singleton = len(grouped_overloads) == 1
916
+ signatures: List[str] = []
917
+ dispatch: List[str] = []
918
+ for overload_index, overload in enumerate(grouped_overloads):
919
+ signature = overload.signature.signature_str(symint=symint)
920
+ signatures.append(f"{cpp_string(str(signature))},")
921
+ dispatch_body = emit_dispatch_case(overload, structseq_typenames, symint=symint)
922
+ dispatch.append(
923
+ PY_VARIABLE_CASE.substitute(
924
+ overload_index=overload_index, body=dispatch_body
925
+ )
926
+ if not is_singleton
927
+ else dispatch_body
928
+ )
929
+
930
+ if noarg:
931
+ template = PY_VARIABLE_METHOD_NOARGS
932
+ elif is_singleton:
933
+ template = PY_VARIABLE_METHOD_VARARGS_SINGLETON
934
+ else:
935
+ template = PY_VARIABLE_METHOD_VARARGS
936
+
937
+ return template.substitute(
938
+ name=name,
939
+ pycname=pycname,
940
+ method_header=method_header,
941
+ max_args=max(o.signature.arguments_count() for o in overloads),
942
+ signatures=signatures,
943
+ traceable=traceable,
944
+ check_has_torch_function=gen_has_torch_function_check(
945
+ name=name,
946
+ module=module,
947
+ noarg=noarg,
948
+ method=method,
949
+ ),
950
+ dispatch=dispatch,
951
+ method_footer=method_footer,
952
+ self_="self_" if method else "nullptr",
953
+ )
954
+
955
+
956
+ def gen_has_torch_function_check(
957
+ name: BaseOperatorName, module: Optional[str], *, noarg: bool, method: bool
958
+ ) -> str:
959
+ if noarg:
960
+ if method:
961
+ return f"""\
962
+ if(check_has_torch_function(self_)) {{
963
+ return handle_torch_function(self_, "{name}");
964
+ }}
965
+ """
966
+ else:
967
+ return ""
968
+
969
+ self_ = "self_" if method else "nullptr"
970
+ namespace = (
971
+ {
972
+ "torch": "THPVariableFunctionsModule",
973
+ "torch.nn": "THPNNVariableFunctionsModule",
974
+ "torch.fft": "THPFFTVariableFunctionsModule",
975
+ "torch.linalg": "THPLinalgVariableFunctionsModule",
976
+ "torch.nested": "THPNestedVariableFunctionsModule",
977
+ "torch.sparse": "THPSparseVariableFunctionsModule",
978
+ "torch.special": "THPSpecialVariableFunctionsModule",
979
+ }[module]
980
+ if module
981
+ else "THPVariableClass"
982
+ )
983
+
984
+ return f"""\
985
+ if(_r.has_torch_function()) {{
986
+ return handle_torch_function(_r, {self_}, args, kwargs, {namespace}, "{module or "torch.Tensor"}");
987
+ }}
988
+ """
989
+
990
+
991
+ # handler for output/no-output overload pair
992
+ PY_VARIABLE_OUT = CodeTemplate(
993
+ """\
994
+ if (_r.isNone(${out_idx})) {
995
+ ${call_dispatch}
996
+ } else {
997
+ ${call_dispatch_out}
998
+ }
999
+ """
1000
+ )
1001
+
1002
+
1003
+ def emit_dispatch_case(
1004
+ overload: PythonSignatureGroup,
1005
+ structseq_typenames: Dict[str, str],
1006
+ *,
1007
+ symint: bool = True,
1008
+ ) -> str:
1009
+ """
1010
+ Emit dispatch code for a single parsed signature. This corresponds to either
1011
+ a single native function, or a pair that differ only in output params. In the
1012
+ latter case, a single python signature is used for both and dispatching
1013
+ switches on the presence/absence of passed output args.
1014
+ """
1015
+ if overload.outplace is not None:
1016
+ # dispatch output and no-output variants, branch on _r.isNone(<out_idx>)
1017
+ return PY_VARIABLE_OUT.substitute(
1018
+ out_idx=overload.signature.output_idx(),
1019
+ call_dispatch=emit_single_dispatch(
1020
+ overload.signature, overload.base, structseq_typenames, symint=symint
1021
+ ),
1022
+ call_dispatch_out=emit_single_dispatch(
1023
+ overload.signature,
1024
+ overload.outplace,
1025
+ structseq_typenames,
1026
+ symint=symint,
1027
+ ),
1028
+ )
1029
+ else:
1030
+ # no-output version only
1031
+ return emit_single_dispatch(
1032
+ overload.signature, overload.base, structseq_typenames, symint=symint
1033
+ )
1034
+
1035
+
1036
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1037
+ #
1038
+ # Forward Declarations Codegen
1039
+ #
1040
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1041
+
1042
+
1043
+ def forward_decls(
1044
+ name: BaseOperatorName,
1045
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
1046
+ *,
1047
+ method: bool,
1048
+ ) -> Tuple[str, ...]:
1049
+ if method:
1050
+ return ()
1051
+
1052
+ pycname = get_pycname(name)
1053
+ if is_noarg(overloads):
1054
+ return (
1055
+ f"""\
1056
+ static PyObject * {pycname}(PyObject* self_, PyObject* args);
1057
+ """,
1058
+ )
1059
+ else:
1060
+ return (
1061
+ f"""\
1062
+ static PyObject * {pycname}(PyObject* self_, PyObject* args, PyObject* kwargs);
1063
+ """,
1064
+ )
1065
+
1066
+
1067
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1068
+ #
1069
+ # Method Def (Binding Table Entry) Codegen
1070
+ #
1071
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1072
+
1073
+
1074
+ def method_def(
1075
+ name: BaseOperatorName,
1076
+ module: Optional[str],
1077
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
1078
+ *,
1079
+ method: bool,
1080
+ ) -> str:
1081
+ """
1082
+ Generate method def entry.
1083
+ """
1084
+ pycname = get_pycname(name)
1085
+
1086
+ if name.dunder_method:
1087
+ # PyMethodDef entry for binary op, throws not implemented error
1088
+ pycname = f"TypeError_to_NotImplemented_<{pycname}>"
1089
+
1090
+ if is_noarg(overloads):
1091
+ flags = "METH_NOARGS" if method else "METH_VARARGS | METH_KEYWORDS"
1092
+ else:
1093
+ pycname = f"castPyCFunctionWithKeywords({pycname})"
1094
+ flags = "METH_VARARGS | METH_KEYWORDS"
1095
+
1096
+ if module == "torch":
1097
+ flags += " | METH_STATIC"
1098
+
1099
+ return f'{{"{name}", {pycname}, {flags}, NULL}},'
1100
+
1101
+
1102
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1103
+ #
1104
+ # Overload Sorting and Grouping
1105
+ #
1106
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1107
+
1108
+
1109
+ def group_overloads(
1110
+ overloads: Sequence[PythonSignatureNativeFunctionPair], *, symint: bool = True
1111
+ ) -> Sequence[PythonSignatureGroup]:
1112
+ bases: Dict[str, PythonSignatureNativeFunctionPair] = {}
1113
+ outplaces: Dict[str, PythonSignatureNativeFunctionPair] = {}
1114
+
1115
+ # first group by signature ignoring out arguments
1116
+ for overload in overloads:
1117
+ sig = overload.signature.signature_str(skip_outputs=True, symint=symint)
1118
+ if overload.function.func.is_out_fn():
1119
+ if sig in outplaces:
1120
+ raise RuntimeError(
1121
+ f"Found duplicated function definition:\n- {overload.function.func}.\n"
1122
+ f"Existing definition:\n- {outplaces[sig].function.func}."
1123
+ )
1124
+ outplaces[sig] = overload
1125
+ else:
1126
+ if sig in bases:
1127
+ raise RuntimeError(
1128
+ f"Found duplicated function definition:\n- {overload.function.func}.\n"
1129
+ f"Existing definition:\n- {bases[sig].function.func}."
1130
+ )
1131
+ bases[sig] = overload
1132
+
1133
+ for sig, out in outplaces.items():
1134
+ if sig not in bases:
1135
+ candidates: List[str] = []
1136
+ for overload in overloads:
1137
+ if (
1138
+ str(overload.function.func.name.name)
1139
+ == str(out.function.func.name.name)
1140
+ and not overload.function.func.is_out_fn()
1141
+ and not overload.signature.deprecated
1142
+ ):
1143
+ candidates.append(
1144
+ overload.signature.signature_str(
1145
+ skip_outputs=True, symint=symint
1146
+ )
1147
+ )
1148
+ out_sig = out.signature.signature_str(symint=symint)
1149
+ raise RuntimeError(
1150
+ f"While identifying overloads, we found an out schema {out_sig} without a corresponding non-out variant. "
1151
+ f"We expected the non-out variant to have schema: \n- {sig}\nPlease check that you spelled the schema "
1152
+ "correctly in native_functions.yaml. We discovered the following candidate(s): \n"
1153
+ + "\n".join(f"- {candidate}" for candidate in candidates)
1154
+ )
1155
+
1156
+ grouped = [
1157
+ PythonSignatureGroup.from_pairs(
1158
+ functional=base,
1159
+ out=outplaces.get(sig),
1160
+ )
1161
+ for sig, base in bases.items()
1162
+ ]
1163
+ return sort_overloads(grouped, symint=symint)
1164
+
1165
+
1166
+ # This function declares a partial order on declarations, and sorts them according
1167
+ # to its linear extension. This is necessary, because there's some ambiguity in the
1168
+ # choice of overload, and we want a different order.
1169
+ #
1170
+ # See Note[Order of overloads matters]
1171
+ #
1172
+ # A few examples of ambiguous python signature pairs.
1173
+ #
1174
+ # All parameters have the same type, except one taking Tensor the other taking
1175
+ # Scalar. A numeric PyObject can be casted into Tensor, and a zero-dim Tensor
1176
+ # object can be accepted as Scalar type parameter (see python_arg_parser.cpp).
1177
+ # Therefore, same input arguments might be accepted by either python signature.
1178
+ # We want to always parse the one taking Tensor first.
1179
+ #
1180
+ # bitwise_and(Tensor input, Tensor other, *, Tensor out=None)
1181
+ # bitwise_and(Tensor input, Scalar other, *, Tensor out=None)
1182
+ #
1183
+ # If they have different number of parameters then they are not ambiguous - but
1184
+ # the difference on output param can be ignored as it's optional.
1185
+ #
1186
+ # multiply(Tensor input, Tensor other, *, Tensor out=None)
1187
+ # multiply(Tensor input, Scalar other)
1188
+ #
1189
+ # Both positional args and keyword-only args are considered together.
1190
+ #
1191
+ # subtract(Tensor other, *, Scalar alpha=1)
1192
+ # subtract(Scalar other, Scalar alpha=1)
1193
+ #
1194
+ # A few ambiguous cases which it does NOT handle yet.
1195
+ #
1196
+ # If there is any difference in other parameters besides the Tensor/Scalar
1197
+ # difference, then they are not considered ambiguous by this method anymore.
1198
+ # However, the difference could be too trivial to disambiguate.
1199
+ #
1200
+ # foo(Tensor input, Scalar other, Scalar bar)
1201
+ # foo(Tensor input, Tensor other, double bar)
1202
+ #
1203
+ # If they are taking different number of parameters then they are not considered
1204
+ # ambiguous anymore, even if the difference is only on optional kwargs.
1205
+ #
1206
+ # foo(Scalar other, Scalar alpha=1)
1207
+ # foo(Tensor other, *, Scalar alpha=1, Scalar beta=1)
1208
+ #
1209
+
1210
+
1211
+ def sort_overloads(
1212
+ grouped_overloads: Sequence[PythonSignatureGroup], *, symint: bool = True
1213
+ ) -> Sequence[PythonSignatureGroup]:
1214
+ # NB: Smaller here means lower priority
1215
+
1216
+ def is_arg_smaller(t1: Type, t2: Type) -> bool:
1217
+ return (
1218
+ str(t1) == "Scalar"
1219
+ and str(t2) == "Tensor"
1220
+ or str(t1) == "Scalar?"
1221
+ and str(t2) == "Tensor?"
1222
+ or "Dimname" in str(t1)
1223
+ and "Dimname" not in str(t2)
1224
+ or
1225
+ # In the discussion https://github.com/pytorch/pytorch/issues/54555 it has been
1226
+ # discussed why it is important to prioritize int/int? over int[]
1227
+ str(t1) == "int[]"
1228
+ and (str(t2) == "int" or str(t2) == "int?")
1229
+ or
1230
+ # TensorList currently throws an error during argument parsing, that's why it needs to be
1231
+ # last in signature ordering. See discussion: https://github.com/pytorch/pytorch/issues/58087
1232
+ str(t1) == "Tensor[]"
1233
+ and str(t2).find("[]") != -1
1234
+ or
1235
+ # Prioritize IntArrayRef overload over SymIntArrayRef
1236
+ str(t1) == "SymInt[]"
1237
+ and str(t2) == "int[]"
1238
+ or
1239
+ # Make sure both in, SymInt are sorted consistently w.r.t. Tensor since Tensor can be implicitly
1240
+ # converted to either int or SymInt. Prioritize the Tensor overload since it otherwise gets shadowed.
1241
+ (str(t1) == "SymInt" or str(t1) == "int")
1242
+ and str(t2) == "Tensor"
1243
+ )
1244
+
1245
+ def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool:
1246
+ """Returns True if s1 < s2 in the partial order."""
1247
+ args1, args2 = s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True)
1248
+ if len(args1) != len(args2):
1249
+ return False
1250
+ # TODO: should use some canonical form instead of 'str(arg.type)' - see comments
1251
+ # above. The old codegen used the deprecated 'dynamic_type(arg.type)', which
1252
+ # ignores the optional annotation, i.e. 'Scalar' and 'Scalar?'.
1253
+ equal = all(arg1.type == arg2.type for arg1, arg2 in zip(args1, args2))
1254
+ smaller_or_equal = all(
1255
+ str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type)
1256
+ for arg1, arg2 in zip(args1, args2)
1257
+ )
1258
+ return smaller_or_equal and not equal
1259
+
1260
+ # First sort by signature
1261
+ grouped_overloads = sorted(
1262
+ grouped_overloads, key=lambda x: x.signature.signature_str(symint=symint)
1263
+ )
1264
+
1265
+ # Construct the relation graph
1266
+ larger_than: Dict[int, Set[int]] = defaultdict(set)
1267
+ for i1, overload1 in enumerate(grouped_overloads):
1268
+ for i2, overload2 in enumerate(grouped_overloads):
1269
+ if is_smaller(overload1.signature, overload2.signature):
1270
+ larger_than[i1].add(i2)
1271
+
1272
+ if not larger_than:
1273
+ return list(grouped_overloads)
1274
+
1275
+ # Use a topological sort to sort overloads according to the partial order.
1276
+ N = len(grouped_overloads)
1277
+ sorted_ids: List[int] = list(filter(lambda x: x not in larger_than, range(N)))
1278
+
1279
+ for idx in range(N):
1280
+ # The size of sorted_ids will grow to N eventually.
1281
+ i = sorted_ids[idx]
1282
+ for j in sorted(larger_than.keys()):
1283
+ larger = larger_than[j]
1284
+ larger.discard(i)
1285
+ if not larger:
1286
+ del larger_than[j]
1287
+ sorted_ids.append(j)
1288
+
1289
+ return [grouped_overloads[x] for x in sorted_ids]
1290
+
1291
+
1292
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1293
+ #
1294
+ # Codegen API Integration
1295
+ #
1296
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1297
+
1298
+
1299
+ def emit_single_dispatch(
1300
+ ps: PythonSignature,
1301
+ f: NativeFunction,
1302
+ structseq_typenames: Dict[str, str],
1303
+ *,
1304
+ symint: bool = True,
1305
+ ) -> str:
1306
+ """
1307
+ Emit dispatch code for a single native function.
1308
+ """
1309
+
1310
+ @with_native_function
1311
+ def go(f: NativeFunction) -> str:
1312
+ # header comments
1313
+ if isinstance(ps, PythonSignatureDeprecated):
1314
+ schema_comment = f"// [deprecated] aten::{ps.deprecated_schema}"
1315
+ else:
1316
+ schema_comment = f"// aten::{f.func}"
1317
+
1318
+ deprecated = "[deprecated] " if ps.deprecated else ""
1319
+
1320
+ # dispatch lambda signature
1321
+ name = cpp.name(f.func)
1322
+ lambda_formals = ", ".join(
1323
+ f"{a.type_str} {a.name}" for a in dispatch_lambda_args(ps, f, symint=symint)
1324
+ )
1325
+ lambda_return = dispatch_lambda_return_str(f)
1326
+
1327
+ # dispatch lambda body
1328
+ dispatch_callee = cpp_dispatch_target(f)
1329
+ dispatch_args = ", ".join(cpp_dispatch_exprs(f, python_signature=ps))
1330
+
1331
+ # from arg parser outputs to dispatch lambda arguments
1332
+ parser_outputs = arg_parser_output_exprs(ps, f, symint=symint)
1333
+ lambda_arg_exprs = dispatch_lambda_exprs(ps, f, symint=symint)
1334
+ inits = "\n".join(lambda_arg_exprs.inits)
1335
+ lambda_args = ", ".join(lambda_arg_exprs.exprs)
1336
+
1337
+ # scatter fields
1338
+ # TODO: Checking `ps.method and ('requires_grad' in parser_outputs)` is a hacky
1339
+ # solution for enabling the 'requires_grad' argument for tensor methods
1340
+ # new_full, new_empty, and new_zeros. A much better but more difficult to
1341
+ # implement solution involves refactoring according to Ed's description here:
1342
+ # https://github.com/pytorch/pytorch/issues/36455#issuecomment-614767589
1343
+ need_set_requires_grad = ps.tensor_options_args and (
1344
+ not has_tensor_options(f)
1345
+ or (ps.method and ("requires_grad" in parser_outputs))
1346
+ )
1347
+ set_requires_grad = (
1348
+ f'.set_requires_grad({parser_outputs["requires_grad"].expr})'
1349
+ if need_set_requires_grad
1350
+ else ""
1351
+ )
1352
+
1353
+ if lambda_return == "void":
1354
+ # Make in-place foreach return `self` at python-binding level.
1355
+ # ref: https://github.com/pytorch/pytorch/pull/118622#pullrequestreview-1904804954
1356
+ self_arg = f.func.arguments.self_arg
1357
+ return_stmt: str
1358
+ if (
1359
+ str(f.func.name).startswith("_foreach_")
1360
+ and f.func.kind() == SchemaKind.inplace
1361
+ ):
1362
+ # note(crcrpar): `_foreach_pow.ScalarAndTensor` does NOT have its in-place
1363
+ # variant and it unlikely to have it in the future. Thus it's safe to have the following assert.
1364
+ assert self_arg is not None and is_tensor_list_type(
1365
+ self_arg.argument.type
1366
+ )
1367
+ return_stmt = """PyObject* self_tensorlist = _r.args[0];
1368
+ Py_INCREF(self_tensorlist);
1369
+ return self_tensorlist;
1370
+ """
1371
+ else:
1372
+ return_stmt = "Py_RETURN_NONE;"
1373
+ return f"""\
1374
+ {schema_comment}
1375
+ {inits}
1376
+ auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{
1377
+ pybind11::gil_scoped_release no_gil;
1378
+ {dispatch_callee}({dispatch_args});
1379
+ }};
1380
+ dispatch_{name}({lambda_args}){set_requires_grad};
1381
+ {return_stmt}
1382
+ """
1383
+ else:
1384
+ typename = structseq_typenames.get(gen_structseq_typename_key(f))
1385
+ structseq_typeref = f"{typename}, " if typename is not None else ""
1386
+ return f"""\
1387
+ {schema_comment}
1388
+ {inits}
1389
+ auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{
1390
+ pybind11::gil_scoped_release no_gil;
1391
+ return {dispatch_callee}({dispatch_args});
1392
+ }};
1393
+ return wrap({structseq_typeref}dispatch_{name}({lambda_args}){set_requires_grad});
1394
+ """
1395
+
1396
+ return go(f)
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_trace_type.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from typing import Dict, List, Sequence, Union
3
+
4
+ from torchgen.api import cpp
5
+ from torchgen.api.types import DispatcherSignature
6
+ from torchgen.code_template import CodeTemplate
7
+ from torchgen.context import with_native_function
8
+ from torchgen.model import Argument, NativeFunction, SchemaKind, TensorOptionsArguments
9
+ from torchgen.utils import FileManager
10
+
11
+ # Note [Manual Backend kernels]
12
+ # For these ops, we want to manually register to dispatch key Backend and
13
+ # skip codegen-ed registeration to all keys before Backend.
14
+ # For codegen this means:
15
+ # - op set below must match ops with manual_kernel_registration=True in native_functions.yaml
16
+ # where we skip codegen backend kernels
17
+ # - all ops below are part of MANUAL_AUTOGRAD to skip codegen Autograd kernel registration
18
+ # - all ops below are part of MANUAL_TRACER to skip codegen Tracer kernel registration
19
+ # Note: we still register to dispatch key Profiler for these ops, keeping it untouched for now.
20
+ # You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
21
+ MANUAL_BACKEND = {
22
+ "options",
23
+ "data",
24
+ "set_data",
25
+ "is_leaf",
26
+ "output_nr",
27
+ "_version",
28
+ "retain_grad",
29
+ "_backward",
30
+ "requires_grad_",
31
+ }
32
+
33
+ # For these ops we want to skip the codegen-ed registration to both Autograd and Tracer keys.
34
+ # You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
35
+ MANUAL_AUTOGRAD_AND_TRACER = {
36
+ "resize_",
37
+ "resize_as_",
38
+ "detach",
39
+ "detach_",
40
+ "copy_",
41
+ "_fw_primal",
42
+ "_make_dual",
43
+ }
44
+
45
+ # Currently MANUAL_AUTOGRAD and MANUAL_TRACER share the same set of ops:
46
+ # union(MANUAL_BACKEND, MANUAL_AUTOGRAD_AND_TRACER)
47
+ # You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
48
+ MANUAL_AUTOGRAD = MANUAL_TRACER = MANUAL_BACKEND | MANUAL_AUTOGRAD_AND_TRACER
49
+
50
+ # These functions we don't want to record for tracing, because we always want
51
+ # to trace their constituent parts. This is a temporary hack in lieue
52
+ # of proper scopes, where subsequent compilation passes can ask for the unfolding
53
+ # on demand. Only concrete ATen methods can be disabled this way; it will have
54
+ # NO EFFECT otherwise.
55
+ DONT_RECORD_TRACE = {
56
+ "convolution",
57
+ "conv1d",
58
+ "conv2d",
59
+ "conv3d",
60
+ "conv_transpose1d",
61
+ "conv_transpose2d",
62
+ "conv_transpose3d",
63
+ "lstm_cell",
64
+ "gru_cell",
65
+ "rnn_tanh_cell",
66
+ "rnn_relu_cell",
67
+ # FIXME: figure out a better way when we support sparse tensors in jit
68
+ "_coalesced",
69
+ }
70
+
71
+
72
+ def should_trace(f: NativeFunction) -> bool:
73
+ # Operations involving Storage or Type are not traceable at the moment
74
+ if any(
75
+ str(arg.type) in {"Storage", "Type", "ConstQuantizerPtr"}
76
+ for arg in f.func.schema_order_arguments()
77
+ ):
78
+ return False
79
+ # We can't trace functions which don't have any Tensor or TensorList returns
80
+ if not any(r.type.is_tensor_like() for r in f.func.returns):
81
+ return False
82
+ return f.func.name.name.base not in DONT_RECORD_TRACE
83
+
84
+
85
+ SELECT = CodeTemplate(
86
+ """\
87
+
88
+ if (${cond}) {
89
+ ${true}
90
+ } else {
91
+ ${false}
92
+ }
93
+ """
94
+ )
95
+
96
+ OP_NAME = CodeTemplate(
97
+ """\
98
+ op_name = c10::Symbol::fromQualString("aten::${trace_name}");
99
+ """
100
+ )
101
+
102
+ # These functions have their names recorded under trace renamed,
103
+ RENAME_TRACE = {
104
+ "zero": "zeros_like", # replacing aten::zero_ with aten::zeros_like
105
+ "fill": "full_like", # replacing aten::fill_ with aten::full_like
106
+ }
107
+
108
+
109
+ def format_trace_op_name(f: NativeFunction) -> str:
110
+ # TODO: byte-for-byte compatible with old codegen behavior - should clean up
111
+ if (
112
+ f.func.kind() in (SchemaKind.functional, SchemaKind.out)
113
+ or f.func.name.name.dunder_method
114
+ ):
115
+ # special case for *_out functions: the in-place and out-of-place ops
116
+ # are overloaded with the same name in the JIT
117
+ trace_name = str(f.func.name.name)
118
+ trace_name = RENAME_TRACE.get(trace_name, trace_name)
119
+ return OP_NAME.substitute(trace_name=trace_name)
120
+
121
+ # otherwise, this is an in-place op and we need to emit both in- and
122
+ # out-of-place versions
123
+ outplace_trace_name = f.func.name.name.base
124
+ inplace_trace_name = cpp.name(f.func)
125
+ outplace_trace_name = RENAME_TRACE.get(outplace_trace_name, outplace_trace_name)
126
+ inplace_trace_name = RENAME_TRACE.get(inplace_trace_name, inplace_trace_name)
127
+
128
+ return SELECT.substitute(
129
+ cond="tracer_state->force_outplace",
130
+ true=OP_NAME.substitute(trace_name=outplace_trace_name),
131
+ false=OP_NAME.substitute(trace_name=inplace_trace_name),
132
+ )
133
+
134
+
135
+ ADD_TRACE_INPUT = CodeTemplate("""jit::tracer::addInputs(node, "${name}", ${input});""")
136
+
137
+
138
+ def format_trace_inputs(f: NativeFunction) -> str:
139
+ def dispatch_trace_input(
140
+ arg: Union[Argument, TensorOptionsArguments]
141
+ ) -> Sequence[str]:
142
+ if isinstance(arg, TensorOptionsArguments):
143
+ name = "options"
144
+ return [
145
+ ADD_TRACE_INPUT.substitute(
146
+ name=name, input="c10::optTypeMetaToScalarType(options.dtype_opt())"
147
+ ),
148
+ ADD_TRACE_INPUT.substitute(name=name, input="options.layout()"),
149
+ ADD_TRACE_INPUT.substitute(name=name, input="options.device()"),
150
+ ADD_TRACE_INPUT.substitute(name=name, input="options.pinned_memory()"),
151
+ ]
152
+ else:
153
+ name = arg.name
154
+ if str(arg.type) == "Tensor?[]":
155
+ return [f'jit::tracer::addInputs(node, "{name}", {name});']
156
+ else:
157
+ return [ADD_TRACE_INPUT.substitute(name=name, input=name)]
158
+
159
+ args: List[Union[Argument, TensorOptionsArguments]] = list(
160
+ f.func.schema_order_arguments()
161
+ )
162
+
163
+ if f.func.is_out_fn():
164
+ # *_out functions take the result as a separate argument, but we don't want to
165
+ # trace that argument directly. Instead, we trace its TensorOptions.
166
+ # So first, we need to remove the out argument from the list of arguments to trace.
167
+ num_out_args = len(f.func.arguments.out)
168
+ args = args[:-num_out_args]
169
+
170
+ trace_inputs = itertools.chain.from_iterable(
171
+ dispatch_trace_input(arg) for arg in args
172
+ )
173
+
174
+ if f.func.is_out_fn():
175
+ # for *_out functions, handle the result argument differently for inplace/outplace.
176
+ # For inplace: just add the input to the end to confirm with the JIT schema
177
+ inplace = [
178
+ ADD_TRACE_INPUT.substitute(
179
+ name=f.func.arguments.out[i].name, input=f.func.arguments.out[i].name
180
+ )
181
+ for i in range(num_out_args)
182
+ ]
183
+
184
+ # for outplace: do nothing, except if the function is a factory.
185
+ # Factories are a bit special because their out-of-place overloads
186
+ # take an extra TensorOptions argument, which is missing in the _out function
187
+ has_tensor_return = any(r.type.is_tensor_like() for r in f.func.returns)
188
+ has_tensor_input_arg = any(
189
+ a.type.is_tensor_like() for a in f.func.arguments.flat_non_out
190
+ )
191
+ is_factory_method = f.category_override == "factory" or (
192
+ has_tensor_return and not has_tensor_input_arg
193
+ )
194
+
195
+ # HACK: preserve old codegen behavior - the old codegen set the `is_factory_method`
196
+ # flag for the whole family of ops with the same basename if any of them is a
197
+ # factory method. For most cases the whole family of ops are indeed all factory
198
+ # method - 'normal' is the only exception. So we handle it specially here to avoid
199
+ # cloning the old logic.
200
+ if f.func.name.name.base == "normal":
201
+ is_factory_method = True
202
+
203
+ if is_factory_method:
204
+ outplace = [
205
+ ADD_TRACE_INPUT.substitute(
206
+ name="out",
207
+ input="c10::optTypeMetaToScalarType(out.options().dtype_opt())",
208
+ ),
209
+ ADD_TRACE_INPUT.substitute(name="out", input="out.options().layout()"),
210
+ ADD_TRACE_INPUT.substitute(name="out", input="out.options().device()"),
211
+ ADD_TRACE_INPUT.substitute(
212
+ name="out", input="out.options().pinned_memory()"
213
+ ),
214
+ ]
215
+ else:
216
+ outplace = []
217
+
218
+ trace_inputs = itertools.chain(
219
+ trace_inputs,
220
+ [
221
+ SELECT.substitute(
222
+ cond="tracer_state->force_outplace",
223
+ true="\n".join(outplace),
224
+ false="\n".join(inplace),
225
+ )
226
+ ],
227
+ )
228
+
229
+ return "\n".join(trace_inputs)
230
+
231
+
232
+ # `torch.jit.trace` have undocumented keyword argument `_force_outplace`,
233
+ # which force jit to replace functions with outplace variants (for
234
+ # example `aten::add_` becomes `aten::add`).
235
+ #
236
+ # This replacement implemented in-place with minimum modifications of
237
+ # arguments stack (as it assumes that outplace call has the same arguments
238
+ # as inplace version).
239
+ #
240
+ # However there are no such substitutions available for `aten::fill_`
241
+ # and `aten::zero_` operators, as we never implemented `aten::fill`
242
+ # and `aten::zero`. So jit tracing hack replacing `aten::zero_` with
243
+ # `aten::zeros_like` and replacing `aten::fill_` with `aten::full_like`.
244
+ #
245
+ # But as they potentially can have different arguments, we also have
246
+ # to hack into the stack and add missing ones.
247
+ #
248
+ # A possible alternative would be:
249
+ #
250
+ # - Add `aten::fill` and `aten::zero`
251
+ #
252
+ # - Or keep `aten::zeros_like` arguments aligned with `aten::zero_`
253
+ # arguments (inside of the `native_functions.yaml`)
254
+ RENAME_TRACE_ADD_ARGS = {
255
+ "fill": """\
256
+ jit::tracer::addInputs(node, "options", c10::optional<ScalarType>());
257
+ jit::tracer::addInputs(node, "options", layout_or_default(c10::nullopt));
258
+ jit::tracer::addInputs(node, "options", device_or_default(c10::nullopt));
259
+ jit::tracer::addInputs(node, "options", pinned_memory_or_default(c10::nullopt));
260
+ c10::optional<MemoryFormat> memory_format = c10::MemoryFormat::Preserve;
261
+ jit::tracer::addInputs(node, "memory_format", memory_format);
262
+ """,
263
+ "zero": """\
264
+ jit::tracer::addInputs(node, "options", c10::optional<ScalarType>());
265
+ jit::tracer::addInputs(node, "options", layout_or_default(c10::nullopt));
266
+ jit::tracer::addInputs(node, "options", device_or_default(c10::nullopt));
267
+ jit::tracer::addInputs(node, "options", pinned_memory_or_default(c10::nullopt));
268
+ c10::optional<MemoryFormat> memory_format = c10::MemoryFormat::Preserve;
269
+ jit::tracer::addInputs(node, "memory_format", memory_format);
270
+ """,
271
+ }
272
+
273
+ INPLACE_GUARD = CodeTemplate(
274
+ """\
275
+ jit::tracer::ensureUniqueIfOutOfPlaced("${name}", ${mutable_input});
276
+ """
277
+ )
278
+
279
+ PRE_RECORD_TRACE = CodeTemplate(
280
+ """\
281
+ torch::jit::Node* node = nullptr;
282
+ std::shared_ptr<jit::tracer::TracingState> tracer_state;
283
+ if (jit::tracer::isTracing()) {
284
+ tracer_state = jit::tracer::getTracingState();
285
+ at::Symbol op_name;
286
+ ${set_op_name}
287
+ node = tracer_state->createNode(op_name, /*num_outputs=*/0);
288
+ jit::tracer::recordSourceLocation(node);
289
+ ${add_trace_inputs}
290
+ tracer_state->insertNode(node);
291
+ ${inplace_guard}
292
+ jit::tracer::setTracingState(nullptr);
293
+ }
294
+ """
295
+ )
296
+
297
+
298
+ def format_prerecord_trace(f: NativeFunction) -> str:
299
+ if not should_trace(f):
300
+ return ""
301
+
302
+ # TODO: clean up old codegen behavior
303
+ is_inplace = (
304
+ f.func.kind() in (SchemaKind.inplace, SchemaKind.out)
305
+ and not f.func.name.name.dunder_method
306
+ )
307
+ add_args = (
308
+ RENAME_TRACE_ADD_ARGS.get(f.func.name.name.base, "") if is_inplace else ""
309
+ )
310
+ additional_inputs = (
311
+ SELECT.substitute(
312
+ cond="tracer_state->force_outplace",
313
+ true=add_args,
314
+ false="",
315
+ )
316
+ if add_args
317
+ else ""
318
+ )
319
+
320
+ return PRE_RECORD_TRACE.substitute(
321
+ set_op_name=format_trace_op_name(f),
322
+ add_trace_inputs=format_trace_inputs(f) + additional_inputs,
323
+ inplace_guard=INPLACE_GUARD.substitute(
324
+ name=cpp.name(f.func),
325
+ mutable_input=f.func.arguments.out[0].name
326
+ if f.func.arguments.out
327
+ else "self",
328
+ )
329
+ if is_inplace
330
+ else "",
331
+ )
332
+
333
+
334
+ POST_RECORD_TRACE = CodeTemplate(
335
+ """\
336
+ if (tracer_state) {
337
+ jit::tracer::setTracingState(std::move(tracer_state));
338
+ ${add_trace_outputs}
339
+ }
340
+ """
341
+ )
342
+
343
+
344
+ def format_postrecord_trace(f: NativeFunction) -> str:
345
+ if not should_trace(f):
346
+ return ""
347
+
348
+ # For outplacing ops, *_out overloads require special handling to move the
349
+ # output *argument* to a return value
350
+ if f.func.is_out_fn():
351
+ output_names_outplace = [arg.name for arg in f.func.arguments.out]
352
+ output_names_inplace = cpp.return_names(f)
353
+
354
+ # Code size optimization: the common case is that the return value is
355
+ # the same for both variants
356
+ if output_names_outplace == output_names_inplace:
357
+ outputs = [
358
+ f"jit::tracer::addOutput(node, {n});" for n in output_names_outplace
359
+ ]
360
+ return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
361
+
362
+ selection = SELECT.substitute(
363
+ cond="force_outplace",
364
+ true="\n".join(
365
+ f"jit::tracer::addOutput(node, {n});" for n in output_names_outplace
366
+ ),
367
+ false="\n".join(
368
+ f"jit::tracer::addOutput(node, {n});" for n in output_names_inplace
369
+ ),
370
+ )
371
+ return POST_RECORD_TRACE.substitute(add_trace_outputs=selection)
372
+ else:
373
+ output_names = cpp.return_names(f)
374
+ outputs = [f"jit::tracer::addOutput(node, {n});" for n in output_names]
375
+ return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
376
+
377
+
378
+ def tie_return_values(f: NativeFunction) -> str:
379
+ if len(f.func.returns) == 1:
380
+ return f'auto {f.func.returns[0].name or "result"}'
381
+ names = cpp.return_names(f)
382
+ return f'auto [{", ".join(names)}]'
383
+
384
+
385
+ def get_return_value(f: NativeFunction) -> str:
386
+ names = cpp.return_names(f)
387
+ if len(f.func.returns) == 1:
388
+ return names[0]
389
+ if f.func.kind() == SchemaKind.out:
390
+ return f'std::forward_as_tuple({", ".join(names)})'
391
+ else:
392
+ moved = ", ".join(f"std::move({name})" for name in names)
393
+ return f"std::make_tuple({moved})"
394
+
395
+
396
+ TRACE_DISPATCH = CodeTemplate(
397
+ """\
398
+ ${assign_return_values}at::_ops::${unambiguous_name}::redispatch(${unpacked_args});"""
399
+ )
400
+
401
+
402
+ def emit_trace_body(f: NativeFunction) -> List[str]:
403
+ trace_body: List[str] = []
404
+
405
+ trace_body.append(format_prerecord_trace(f))
406
+
407
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
408
+ dispatcher_exprs = dispatcher_sig.exprs()
409
+
410
+ # code-generated tracing kernels plumb and recompute dispatch keys directly through the kernel for performance.
411
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
412
+ dispatch_key_set = "ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::Tracer)"
413
+ redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs])
414
+
415
+ assign_return_values = (
416
+ f"{tie_return_values(f)} = "
417
+ if f.func.kind() in [SchemaKind.functional, SchemaKind.mutable]
418
+ and f.func.returns
419
+ else ""
420
+ )
421
+
422
+ # Note that this calls the slow, dispatching variants of manual_cpp_binding ops.
423
+ # We could probably work harder to ensure that the fast variants are
424
+ # called instead, but the perf benefit would be minimal.
425
+ trace_body.append(
426
+ TRACE_DISPATCH.substitute(
427
+ assign_return_values=assign_return_values,
428
+ unambiguous_name=f.func.name.unambiguous_name(),
429
+ unpacked_args=redispatch_args,
430
+ )
431
+ )
432
+
433
+ trace_body.append(format_postrecord_trace(f))
434
+ if f.func.returns:
435
+ trace_body.append(f"return {get_return_value(f)};")
436
+ return trace_body
437
+
438
+
439
+ METHOD_DEFINITION = CodeTemplate(
440
+ """\
441
+ ${return_type} ${type_wrapper_name}(${formals}) {
442
+ ${type_definition_body}
443
+ }
444
+ """
445
+ )
446
+
447
+
448
+ def type_wrapper_name(f: NativeFunction, key: str = "Default") -> str:
449
+ if f.func.name.overload_name:
450
+ name = f"{cpp.name(f.func)}_{f.func.name.overload_name}"
451
+ else:
452
+ name = cpp.name(f.func)
453
+
454
+ # The key argument is only used in gen_variable_type where we need fns per autograd dispatch key.
455
+ # In gen_trace_type and gen_inplace_view_type where only one fn per native_fn must be generated,
456
+ # the key argument should not be passed.
457
+ # We do not append key if it is Default so that generated functions from
458
+ # before per-dispatch-key derivatives were added retain the same names.
459
+ if key != "Default":
460
+ name = name + f"_{key}"
461
+ return name
462
+
463
+
464
+ @with_native_function
465
+ def method_definition(f: NativeFunction) -> str:
466
+ assert cpp.name(f.func) not in MANUAL_TRACER
467
+
468
+ formals = ", ".join(
469
+ # code-generated tracing kernels plumb and recompute dispatch keys directly through the kernel for performance.
470
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
471
+ ["c10::DispatchKeySet ks"]
472
+ + [
473
+ f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}'
474
+ for a in f.func.schema_order_arguments()
475
+ ]
476
+ )
477
+
478
+ return METHOD_DEFINITION.substitute(
479
+ return_type=cpp.returns_type(f.func.returns, symint=True).cpp_type(),
480
+ type_wrapper_name=type_wrapper_name(f),
481
+ formals=formals,
482
+ type_definition_body=emit_trace_body(f),
483
+ )
484
+
485
+
486
+ WRAPPER_REGISTRATION = CodeTemplate(
487
+ """\
488
+ m.impl("${name}",
489
+ TORCH_FN(${class_type}::${type_wrapper_name})
490
+ );
491
+ """
492
+ )
493
+
494
+
495
+ @with_native_function
496
+ def method_registration(f: NativeFunction) -> str:
497
+ assert cpp.name(f.func) not in MANUAL_TRACER
498
+
499
+ return WRAPPER_REGISTRATION.substitute(
500
+ name=f.func.name,
501
+ type_wrapper_name=type_wrapper_name(f),
502
+ class_type="TraceType",
503
+ )
504
+
505
+
506
+ def gen_trace_type_func(fn: NativeFunction) -> Dict[str, List[str]]:
507
+ return {
508
+ "ops_headers": [f"#include <ATen/ops/{fn.root_name}_ops.h>"],
509
+ "trace_method_definitions": [method_definition(fn)],
510
+ "trace_wrapper_registrations": [method_registration(fn)],
511
+ }
512
+
513
+
514
+ def gen_trace_type(
515
+ out: str, native_functions: List[NativeFunction], template_path: str
516
+ ) -> None:
517
+ # NOTE: see Note [Sharded File] at the top of the VariableType.cpp
518
+ # template regarding sharding of the generated files.
519
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
520
+ fm.write_sharded(
521
+ "TraceType.cpp",
522
+ [fn for fn in native_functions if cpp.name(fn.func) not in MANUAL_TRACER],
523
+ key_fn=lambda fn: fn.root_name,
524
+ base_env={
525
+ "generated_comment": "@"
526
+ + f"generated from {fm.template_dir_for_comments()}/TraceType.cpp",
527
+ },
528
+ env_callable=gen_trace_type_func,
529
+ num_shards=5,
530
+ sharded_keys={
531
+ "ops_headers",
532
+ "trace_method_definitions",
533
+ "trace_wrapper_registrations",
534
+ },
535
+ )
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_factories.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates C++ functions that wrap ATen tensor factory methods to turn them into Variables.
2
+ #
3
+ # This writes one file: variable_factories.h
4
+
5
+ import re
6
+ from typing import List, Optional
7
+
8
+ import torchgen.api.python as python
9
+ from torchgen.api import cpp
10
+
11
+ from torchgen.api.types import CppSignatureGroup
12
+ from torchgen.context import with_native_function
13
+ from torchgen.gen import parse_native_yaml
14
+ from torchgen.model import NativeFunction, TensorOptionsArguments, Variant
15
+ from torchgen.utils import FileManager, mapMaybe
16
+
17
+ OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>")
18
+ TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)")
19
+
20
+
21
+ # Add 'at::' to types defined in ATen namespace, e.g. Tensor, TensorList, IntArrayRef and etc.
22
+ # TODO: maybe update the cpp argument API to take optional namespace argument?
23
+ def fully_qualified_type(argument_type: str) -> str:
24
+ def maybe_optional_type(type: str, is_opt: bool) -> str:
25
+ return f"c10::optional<{type}>" if is_opt else type
26
+
27
+ opt_match = OPTIONAL_TYPE_PATTERN.match(argument_type)
28
+ is_opt = opt_match is not None
29
+ if opt_match:
30
+ argument_type = argument_type[opt_match.start(1) : opt_match.end(1)]
31
+ match = TYPE_PATTERN.match(argument_type)
32
+ if match is None:
33
+ return maybe_optional_type(argument_type, is_opt)
34
+ index = match.start(1)
35
+ qualified_type = f"{argument_type[:index]}at::{argument_type[index:]}"
36
+ return maybe_optional_type(qualified_type, is_opt)
37
+
38
+
39
+ def gen_variable_factories(
40
+ out: str, native_yaml_path: str, tags_yaml_path: str, template_path: str
41
+ ) -> None:
42
+ native_functions = parse_native_yaml(
43
+ native_yaml_path, tags_yaml_path
44
+ ).native_functions
45
+ factory_functions = [fn for fn in native_functions if is_factory_function(fn)]
46
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
47
+ fm.write_with_template(
48
+ "variable_factories.h",
49
+ "variable_factories.h",
50
+ lambda: {
51
+ "generated_comment": "@"
52
+ + f"generated from {fm.template_dir_for_comments()}/variable_factories.h",
53
+ "ops_headers": [
54
+ f"#include <ATen/ops/{fn.root_name}.h>" for fn in factory_functions
55
+ ],
56
+ "function_definitions": list(mapMaybe(process_function, factory_functions)),
57
+ },
58
+ )
59
+
60
+
61
+ @with_native_function
62
+ def is_factory_function(f: NativeFunction) -> bool:
63
+ if Variant.function not in f.variants:
64
+ return False
65
+
66
+ name = cpp.name(f.func)
67
+ has_tensor_options = python.has_tensor_options(f)
68
+ return has_tensor_options or name.endswith("_like")
69
+
70
+
71
+ @with_native_function
72
+ def process_function(f: NativeFunction) -> Optional[str]:
73
+ name = cpp.name(f.func)
74
+ has_tensor_options = python.has_tensor_options(f)
75
+ is_factory = has_tensor_options or name.endswith("_like")
76
+
77
+ if Variant.function not in f.variants or not is_factory:
78
+ return None
79
+
80
+ cpp_sigs = CppSignatureGroup.from_native_function(f, method=False)
81
+ sigs = [cpp_sigs.signature]
82
+ if cpp_sigs.symint_signature is not None:
83
+ sigs.append(cpp_sigs.symint_signature)
84
+ r = ""
85
+ for sig in sigs:
86
+ formals: List[str] = []
87
+ exprs: List[str] = []
88
+ requires_grad = "false"
89
+ for arg in sig.arguments():
90
+ qualified_type = fully_qualified_type(arg.type)
91
+ if arg.default:
92
+ formals.append(f"{qualified_type} {arg.name} = {arg.default}")
93
+ else:
94
+ formals.append(f"{qualified_type} {arg.name}")
95
+
96
+ if isinstance(arg.argument, TensorOptionsArguments):
97
+ # note: we remove the requires_grad setting from the TensorOptions because
98
+ # it is ignored anyways (and we actually have an assertion that it isn't set
99
+ # which would fail otherwise). We handle requires_grad explicitly here
100
+ # instead of passing it through to the kernel.
101
+ exprs.append(
102
+ f"at::TensorOptions({arg.name}).requires_grad(c10::nullopt)"
103
+ )
104
+ # Manually set the requires_grad bit on the result tensor.
105
+ requires_grad = f"{arg.name}.requires_grad()"
106
+ else:
107
+ exprs.append(arg.name)
108
+
109
+ r += f"""\
110
+ inline at::Tensor {sig.name()}({', '.join(formals)}) {{
111
+ at::AutoDispatchBelowADInplaceOrView guard;
112
+ return autograd::make_variable(at::{sig.name()}({', '.join(exprs)}), /*requires_grad=*/{requires_grad});
113
+ }}
114
+ """
115
+ return r
llmeval-env/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py ADDED
@@ -0,0 +1,2162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates VariableType.h/cpp
2
+ #
3
+ # **If any changes are being made to the VariableType codegen please also check
4
+ # if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
5
+ #
6
+ # VariableType is a subclass of at::Type that provides the binding code
7
+ # necessary to provide a differentiable version of ATen operators. There are a
8
+ # number of different things we could mean:
9
+ #
10
+ # - Given a non-differentiable forward implementation, we might
11
+ # directly associate it with a backward implementation to make
12
+ # it differentiable. This is the common case.
13
+ #
14
+ # - Some functions don't need a backwards implementation, because
15
+ # backpropagation will never propagate beyond them. There are a
16
+ # number of different reasons why this may be the case:
17
+ #
18
+ # - The function has no differentiable inputs
19
+ # - The function's output is not differentiable
20
+ # - The function has no data dependency on its input
21
+ #
22
+ # - Some function don't need a backwards implementation because they
23
+ # are implemented as a composition of other (differentiable) ATen
24
+ # functions. These are dispatched directly to the Type superclass,
25
+ # which will in turn dispatch back to VariableType for its
26
+ # differentiable subcomponents.
27
+ #
28
+ import re
29
+ from typing import Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
30
+
31
+ from torchgen.api import cpp
32
+ from torchgen.api.autograd import (
33
+ DifferentiableInput,
34
+ dispatch_strategy,
35
+ ForwardDerivative,
36
+ gen_differentiable_outputs,
37
+ is_differentiable,
38
+ NativeFunctionWithDifferentiabilityInfo,
39
+ SavedAttribute,
40
+ )
41
+
42
+ from torchgen.api.types import (
43
+ ArrayRefCType,
44
+ BaseCppType,
45
+ BaseCType,
46
+ Binding,
47
+ DispatcherSignature,
48
+ intArrayRefT,
49
+ iTensorListRefT,
50
+ ListCType,
51
+ MutRefCType,
52
+ OptionalCType,
53
+ scalarT,
54
+ SpecialArgName,
55
+ stringT,
56
+ symIntArrayRefT,
57
+ TENSOR_LIST_LIKE_CTYPES,
58
+ tensorListT,
59
+ tensorT,
60
+ TupleCType,
61
+ VectorCType,
62
+ )
63
+ from torchgen.code_template import CodeTemplate
64
+ from torchgen.context import (
65
+ native_function_manager,
66
+ with_native_function,
67
+ with_native_function_and,
68
+ )
69
+ from torchgen.model import (
70
+ Argument,
71
+ BaseType,
72
+ ListType,
73
+ NativeFunction,
74
+ SchemaKind,
75
+ SelfArgument,
76
+ TensorOptionsArguments,
77
+ )
78
+ from torchgen.utils import FileManager, mapMaybe
79
+
80
+ from .context import with_native_function_with_differentiability_info_and_key
81
+ from .gen_inplace_or_view_type import (
82
+ ALL_VIEW_FUNCTIONS,
83
+ ASSIGN_RETURN_VALUE,
84
+ AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION,
85
+ gen_formals,
86
+ get_base_name,
87
+ get_view_info,
88
+ is_tensor_list_type,
89
+ is_tensor_type,
90
+ METHOD_DEFINITION,
91
+ modifies_arguments,
92
+ TMP_VAR,
93
+ unpack_args,
94
+ unpacked_name,
95
+ use_derived,
96
+ WRAPPER_REGISTRATION,
97
+ )
98
+ from .gen_trace_type import (
99
+ get_return_value,
100
+ MANUAL_AUTOGRAD_AND_TRACER,
101
+ MANUAL_BACKEND,
102
+ tie_return_values,
103
+ type_wrapper_name,
104
+ )
105
+
106
+ # We don't set or modify grad_fn on these methods. Generally, they return
107
+ # tensors that have requires_grad=False. In-place functions listed here will
108
+ # not examine or modify requires_grad or grad_fn.
109
+ # NB: this does NOT include overload name
110
+ DONT_REQUIRE_DERIVATIVE = {
111
+ # These only depend on the input Tensor's shape and device, not the data
112
+ "empty_like",
113
+ "ones_like",
114
+ "full_like",
115
+ "zeros_like",
116
+ "rand_like",
117
+ "randn_like",
118
+ "new_empty",
119
+ "new_empty_strided",
120
+ "new_full",
121
+ "new_zeros",
122
+ "new_ones",
123
+ # These are only implemented on integral types
124
+ "__and__",
125
+ "__iand__",
126
+ "__ilshift__",
127
+ "__ior__",
128
+ "__irshift__",
129
+ "__ixor__",
130
+ "__lshift__",
131
+ "__or__",
132
+ "__rshift__",
133
+ "__xor__",
134
+ # These work on integral data types, and hence don't require derivative
135
+ "_sobol_engine_draw",
136
+ "_sobol_engine_ff",
137
+ "_sobol_engine_scramble_",
138
+ "_sobol_engine_initialize_state_",
139
+ # This is an unsafe method that is meant to be out of reach of autograd.
140
+ "_coalesced_",
141
+ # Quantize functions should not record gradients
142
+ "quantize_per_tensor",
143
+ "quantize_per_channel",
144
+ # Functions that return integers should not have output that require gradients
145
+ "argmax",
146
+ "argmin",
147
+ "argsort",
148
+ "searchsorted",
149
+ "bucketize",
150
+ # Functions that return booleans are not differentiable
151
+ "isnan",
152
+ "isposinf",
153
+ "isneginf",
154
+ "isinf",
155
+ "signbit",
156
+ "isin",
157
+ "allclose",
158
+ # Functions return none are not differentiable
159
+ "record_stream",
160
+ # These functions are not differentiable
161
+ "logical_and",
162
+ "logical_xor",
163
+ "logical_not",
164
+ "logical_or",
165
+ # This function returns nested_tensor shape as a tensor that is non-differentiable
166
+ "_nested_tensor_size",
167
+ "_nested_tensor_strides",
168
+ "_nested_tensor_storage_offsets",
169
+ }
170
+
171
+ # The C -> R functions at the time of adding this are still being audited and tested
172
+ # but will not error out.
173
+ # C -> C, R -> C functions for which backward is correctly implemented and tested
174
+ GRADIENT_IMPLEMENTED_FOR_COMPLEX = {
175
+ "fill",
176
+ "t",
177
+ "view",
178
+ "reshape",
179
+ "reshape_as",
180
+ "view_as",
181
+ "roll",
182
+ "clone",
183
+ "block_diag",
184
+ "diag_embed",
185
+ "repeat",
186
+ "expand",
187
+ "flip",
188
+ "fliplr",
189
+ "flipud",
190
+ "rot90",
191
+ "nanmean",
192
+ "nansum",
193
+ "transpose",
194
+ "permute",
195
+ "squeeze",
196
+ "unsqueeze",
197
+ "resize",
198
+ "resize_as",
199
+ "tril",
200
+ "triu",
201
+ "chunk",
202
+ "zero_",
203
+ "eq_",
204
+ "ne_",
205
+ "add",
206
+ "__radd__",
207
+ "sum",
208
+ "_conj",
209
+ "sin",
210
+ "cos",
211
+ "mul",
212
+ "sinc",
213
+ "sinh",
214
+ "cosh",
215
+ "__rmul__",
216
+ "sgn",
217
+ "asin",
218
+ "acos",
219
+ "sub",
220
+ "div",
221
+ "cat",
222
+ "view_as_complex",
223
+ "index_put",
224
+ "neg",
225
+ "complex",
226
+ "select",
227
+ "where",
228
+ "as_strided",
229
+ "as_strided_scatter",
230
+ "slice",
231
+ "constant_pad_nd",
232
+ "unbind",
233
+ "split",
234
+ "split_with_sizes",
235
+ "unsafe_split",
236
+ "split_with_sizes_backward",
237
+ "dot",
238
+ "vdot",
239
+ "cholesky",
240
+ "triangular_solve",
241
+ "mm",
242
+ "_unsafe_view",
243
+ "mv",
244
+ "outer",
245
+ "bmm",
246
+ "diagonal",
247
+ "alias",
248
+ "atan",
249
+ "log",
250
+ "log10",
251
+ "log1p",
252
+ "log2",
253
+ "logaddexp",
254
+ "logcumsumexp",
255
+ "reciprocal",
256
+ "tan",
257
+ "pow",
258
+ "rsqrt",
259
+ "tanh",
260
+ "tanh_backward",
261
+ "asinh",
262
+ "acosh",
263
+ "atanh",
264
+ "take",
265
+ "fill_",
266
+ "exp",
267
+ "exp2",
268
+ "expm1",
269
+ "nonzero",
270
+ "mean",
271
+ "std_mean",
272
+ "var_mean",
273
+ "inverse",
274
+ "solve",
275
+ "linalg_cholesky",
276
+ "addcmul",
277
+ "addcdiv",
278
+ "matrix_exp",
279
+ "linalg_matrix_exp",
280
+ "_linalg_eigh",
281
+ "cholesky_solve",
282
+ "linalg_qr",
283
+ "_linalg_svd",
284
+ "_fft_c2c",
285
+ "_fft_r2c",
286
+ "linalg_solve",
287
+ "sqrt",
288
+ "stack",
289
+ "gather",
290
+ "index_select",
291
+ "index_add_",
292
+ "linalg_inv",
293
+ "linalg_inv_ex",
294
+ "baddbmm",
295
+ "addbmm",
296
+ "addmm",
297
+ "addmv",
298
+ "addr",
299
+ "linalg_householder_product",
300
+ "ormqr",
301
+ "reflection_pad1d",
302
+ "reflection_pad2d",
303
+ "reflection_pad3d",
304
+ "linalg_cholesky_ex",
305
+ "linalg_eig",
306
+ "diagonal_copy",
307
+ "diagonal_scatter",
308
+ "select_backward",
309
+ "diagonal_backward",
310
+ "slice_backward",
311
+ "reflection_pad1d_backward",
312
+ "reflection_pad2d_backward",
313
+ "reflection_pad3d_backward",
314
+ "_sparse_sparse_matmul",
315
+ "replication_pad1d",
316
+ "replication_pad2d",
317
+ "replication_pad3d",
318
+ "put",
319
+ "put_",
320
+ "_to_copy",
321
+ "replication_pad1d_backward",
322
+ "replication_pad2d_backward",
323
+ "replication_pad3d_backward",
324
+ "diag",
325
+ "masked_scatter",
326
+ "masked_select",
327
+ "index_add",
328
+ "index_fill",
329
+ "trace",
330
+ "polar",
331
+ "cumsum",
332
+ "rsub",
333
+ "eig",
334
+ "lerp",
335
+ "linalg_vector_norm",
336
+ "cumprod",
337
+ "prod",
338
+ "index_copy",
339
+ "lu",
340
+ "unfold",
341
+ "unfold_backward",
342
+ "index",
343
+ "masked_fill",
344
+ "masked_scatter_backward",
345
+ "linalg_cross",
346
+ "lu_unpack",
347
+ "renorm",
348
+ "_conj_physical",
349
+ "linalg_lu_factor_ex",
350
+ "scatter",
351
+ "scatter_add",
352
+ "sigmoid",
353
+ "sigmoid_backward",
354
+ "sparse_mask",
355
+ "trapezoid",
356
+ "cumulative_trapezoid",
357
+ "conj_physical_",
358
+ "_neg_view",
359
+ "_reshape_alias",
360
+ "_reshape_copy",
361
+ "_linalg_det",
362
+ "lu_solve",
363
+ "linalg_solve_triangular",
364
+ "linalg_pinv",
365
+ "linalg_lstsq",
366
+ "unfold_copy",
367
+ "col2im",
368
+ "im2col",
369
+ "cholesky_inverse",
370
+ "to_sparse",
371
+ "sparse_sampled_addmm",
372
+ "linalg_lu",
373
+ "pixel_shuffle",
374
+ "pixel_unshuffle",
375
+ "linalg_lu_solve",
376
+ "_linalg_slogdet",
377
+ "_linalg_solve_ex",
378
+ }
379
+
380
+ GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX = {
381
+ "_to_dense",
382
+ "_coalesce",
383
+ "coalesce",
384
+ "values",
385
+ "_sparse_coo_tensor_with_dims_and_tensors",
386
+ "_sparse_addmm",
387
+ }
388
+
389
+ GRADIENT_IMPLEMENTED_FOR_COMPLEX.update(GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX)
390
+
391
+ # Some operators invalidate the grad_accumulator. Let's reset it.
392
+ RESET_GRAD_ACCUMULATOR = {"set_", "resize_"}
393
+
394
+ # NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
395
+ #
396
+ # We check the following properties:
397
+ # 1) A function should never change the input tensors' underlying c10::TensorImpl
398
+ # pointers or c10::Storage pointers, even if it modifies its input tensors (via
399
+ # inplace or out-variants)
400
+ # If the function does not modify its arguments, we also check the following properties
401
+ # pertaining to its output:
402
+ # 2) Its TensorImpl has use_count of 1
403
+ # 3) If the function is a view function, it has the same StorageImpl as that of
404
+ # the input it is aliased with. Otherwise, its StorageImpl has use_count of 1
405
+ #
406
+ # The following code templates implement the checks for this invariant:
407
+ SAVE_TENSOR_STORAGE = CodeTemplate(
408
+ """\
409
+ c10::optional<Storage> ${tensor_name}_storage_saved =
410
+ ${tensor_name}.has_storage() ? c10::optional<Storage>(${tensor_name}.storage()) : c10::nullopt;
411
+ """
412
+ )
413
+
414
+
415
+ # If tensor_name == out_tensor_name, used to enforce (1), otherwise used for (2)
416
+ ENFORCE_SAME_TENSOR_STORAGE = CodeTemplate(
417
+ """\
418
+ if (${tensor_name}_storage_saved.has_value() &&
419
+ !at::impl::dispatch_mode_enabled() &&
420
+ !at::impl::tensor_has_dispatch(${tensor_name}))
421
+ TORCH_INTERNAL_ASSERT(${tensor_name}_storage_saved.value().is_alias_of(${out_tensor_name}.storage()));
422
+ """
423
+ )
424
+
425
+ SAVE_TENSORLIST_STORAGE = CodeTemplate(
426
+ """\
427
+ std::vector<c10::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
428
+ for (const Tensor& tensor : ${tensorlist_name})
429
+ ${tensorlist_name}_storage_saved.push_back(
430
+ tensor.has_storage() ? c10::optional<Storage>(tensor.storage()) : c10::nullopt);
431
+ """
432
+ )
433
+
434
+ ENFORCE_SAME_TENSORLIST_STORAGE = CodeTemplate(
435
+ """\
436
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
437
+ if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
438
+ TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(${tensorlist_name}[i].storage()));
439
+ }
440
+ """
441
+ )
442
+
443
+ SAVE_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
444
+ """\
445
+ std::vector<c10::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
446
+ for (const c10::optional<Tensor>& tensor : ${tensorlist_name})
447
+ ${tensorlist_name}_storage_saved.push_back(
448
+ tensor.has_value() && tensor->has_storage() ? c10::optional<Storage>(tensor->storage()) : c10::nullopt);
449
+ """
450
+ )
451
+
452
+ ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
453
+ """\
454
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
455
+ if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
456
+ TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(
457
+ static_cast<c10::optional<Tensor>>(${tensorlist_name}[i])->storage()));
458
+ }
459
+ """
460
+ )
461
+
462
+ SAVE_TENSOR_IMPL = CodeTemplate(
463
+ """\
464
+ c10::intrusive_ptr<TensorImpl> ${tensor_name}_impl_saved;
465
+ if (${tensor_name}.defined()) ${tensor_name}_impl_saved = ${tensor_name}.getIntrusivePtr();
466
+ """
467
+ )
468
+
469
+ ENFORCE_SAME_TENSOR_IMPL = CodeTemplate(
470
+ """\
471
+ if (${tensor_name}_impl_saved && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
472
+ TORCH_INTERNAL_ASSERT(${tensor_name}_impl_saved == ${tensor_name}.getIntrusivePtr());
473
+ """
474
+ )
475
+
476
+ ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE = CodeTemplate(
477
+ """\
478
+ if (!at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
479
+ TORCH_INTERNAL_ASSERT(${tensor_name}.use_count() <= 1, "function: ${fn_name}");
480
+ """
481
+ )
482
+
483
+ ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE = CodeTemplate(
484
+ """\
485
+ if (${tensor_name}.has_storage() && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name})) {
486
+ TORCH_INTERNAL_ASSERT(${tensor_name}.storage().use_count() == 1, "function: ${fn_name}");
487
+ }
488
+ """
489
+ )
490
+
491
+ SAVE_TENSORLIST_IMPL = CodeTemplate(
492
+ """\
493
+ std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
494
+ for (size_t i=0; i<${tensorlist_name}.size(); i++)
495
+ if (${tensorlist_name}[i].defined()) ${tensorlist_name}_impl_saved[i] = ${tensorlist_name}[i].getIntrusivePtr();
496
+ """
497
+ )
498
+
499
+ ENFORCE_SAME_TENSORLIST_IMPL = CodeTemplate(
500
+ """\
501
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
502
+ if (${tensorlist_name}_impl_saved[i] && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
503
+ TORCH_INTERNAL_ASSERT(${tensorlist_name}_impl_saved[i] == ${tensorlist_name}[i].getIntrusivePtr());
504
+ }
505
+ """
506
+ )
507
+
508
+ SAVE_OPTIONALTENSORLIST_IMPL = CodeTemplate(
509
+ """\
510
+ std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
511
+ for (size_t i=0; i<${tensorlist_name}.size(); i++) {
512
+ c10::optional<Tensor> t = ${tensorlist_name}[i];
513
+ if (t.has_value() && t->defined()) ${tensorlist_name}_impl_saved[i] = t->getIntrusivePtr();
514
+ }
515
+ """
516
+ )
517
+
518
+ ENFORCE_SAME_OPTIONALTENSORLIST_IMPL = CodeTemplate(
519
+ """\
520
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
521
+ if (${tensorlist_name}_impl_saved[i])
522
+ TORCH_INTERNAL_ASSERT(
523
+ ${tensorlist_name}_impl_saved[i] == static_cast<c10::optional<Tensor>>(${tensorlist_name}[i])->getIntrusivePtr());
524
+ }
525
+ """
526
+ )
527
+
528
+ # The following list contains functions that we don't enforce the invariant on.
529
+ DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE = {
530
+ # These functions are expected to change impl or storage of input tensors
531
+ "set_",
532
+ "_cudnn_rnn_flatten_weight",
533
+ }
534
+ DONT_ENFORCE_TENSOR_IMPL_USE_COUNT = {
535
+ # These non-inplace, non-out functions return tensors with use_count > 1
536
+ # Therefore, they MAY (but not necessarily) return one of its inputs as-is
537
+ # See https://github.com/pytorch/pytorch/issues/60426 for more information
538
+ "_embedding_bag",
539
+ "_embedding_bag_forward_only",
540
+ "q_per_channel_scales",
541
+ "q_per_channel_zero_points",
542
+ "lu_unpack",
543
+ "_cudnn_rnn_backward",
544
+ # The below failed StorageImpl use_count check but we skip tensor_impl check
545
+ # just in case
546
+ "_cudnn_rnn",
547
+ "dequantize_self",
548
+ # lift() should never actually be called with a requires_grad=True tensor,
549
+ "lift",
550
+ "lift_fresh",
551
+ "lift_fresh_copy",
552
+ # Nested Tensors related functions
553
+ # _nested_tensor_size() should never actually be called with requires_grad=True tensor
554
+ "_nested_tensor_size",
555
+ "_nested_tensor_strides",
556
+ "_nested_tensor_storage_offsets",
557
+ }
558
+
559
+ DONT_ENFORCE_STORAGE_IMPL_USE_COUNT = {
560
+ # These non-view functions return tensors with storage use_count != 1
561
+ "_slow_conv2d_forward",
562
+ "slow_conv3d_forward",
563
+ "channel_shuffle",
564
+ # If an input is returned as-is in output, we cannot guarantee its storage_impl
565
+ # use count to be 1 either.
566
+ *DONT_ENFORCE_TENSOR_IMPL_USE_COUNT,
567
+ }
568
+ # END CHECKS FOR [ TensorImpl and Storage Pointer Sanity Checks ]
569
+
570
+ DECLARE_GRAD_FN = CodeTemplate(
571
+ """\
572
+ std::shared_ptr<${op}> grad_fn;
573
+ """
574
+ )
575
+
576
+ DECLARE_VECTOR_OF_GRAD_FN = CodeTemplate(
577
+ """\
578
+ std::vector<std::shared_ptr<${op}>> grad_fns;
579
+ """
580
+ )
581
+
582
+ SETUP_ANY_REQUIRES_GRAD = CodeTemplate(
583
+ """\
584
+ [[maybe_unused]] auto _any_requires_grad = compute_requires_grad( ${args_with_derivatives} );
585
+ ${extra_differentiability_conditions}
586
+ """
587
+ )
588
+
589
+ SETUP_DERIVATIVE = CodeTemplate(
590
+ """\
591
+ if (_any_requires_grad) {
592
+ ${setup}
593
+ }
594
+ """
595
+ )
596
+
597
+ SETUP_NONE_REQUIRES_GRAD = CodeTemplate(
598
+ """\
599
+ if (compute_requires_grad( ${args_to_check} )) {
600
+ throw_error_out_requires_grad("${base_name}");
601
+ }
602
+ """
603
+ )
604
+
605
+ ASSIGN_GRAD_FN = CodeTemplate(
606
+ """\
607
+ grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode);
608
+ grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
609
+ """
610
+ )
611
+
612
+ # note(crcrpar): `compute_requires_grad` in the template below is supplied with arguments indexed with `i`
613
+ # while the `SETUP_ANY_REQUIRES_GRAD` above takes whole tensors and scalars.
614
+ ASSIGN_VECTOR_OF_GRAD_FN = CodeTemplate(
615
+ """\
616
+ for (const auto& i : c10::irange( ${irange} )) {
617
+ const auto ith_requires_grad = compute_requires_grad(${args_with_derivatives});
618
+ check_inplace(self[i], ith_requires_grad);
619
+ grad_fns.push_back([&]() -> std::shared_ptr<${op}> {
620
+ if (!ith_requires_grad) {
621
+ return nullptr;
622
+ } else {
623
+ auto grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode);
624
+ grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
625
+ return grad_fn;
626
+ }
627
+ }());
628
+ }
629
+ """
630
+ )
631
+
632
+ CALL_REDISPATCH = CodeTemplate(
633
+ """\
634
+ at::redispatch::${api_name}(${unpacked_args})"""
635
+ )
636
+ # If the non-variable operation has return values, we use the `tmp` variable to hold the
637
+ # values temporarily and pass the values to the return variables outside of the
638
+ # `at::AutoDispatchBelowAutograd` guard block.
639
+ DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP = CodeTemplate(
640
+ """\
641
+ auto ${tmp_var} = ([&]() {
642
+ if (${any_has_forward_grad}) {
643
+ static c10::OperatorName full_name("aten::${op_name}", "${op_overload}");
644
+ static c10::optional<c10::OperatorHandle> opt_op = c10::Dispatcher::singleton().findSchema(full_name);
645
+ return impl::run_jit_decomposition_with_args_for_jvp<${return_types}>("${op_name}", *opt_op, ks, ${arg_names});
646
+ } else {
647
+ ${guard}
648
+ return ${base_type_call};
649
+ }
650
+ })();
651
+ """
652
+ )
653
+
654
+ DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES = CodeTemplate(
655
+ """\
656
+ auto ${tmp_var} = ([&]() {
657
+ ${guard}
658
+ return ${base_type_call};
659
+ })();
660
+ """
661
+ )
662
+
663
+ DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES = CodeTemplate(
664
+ """\
665
+ {
666
+ ${guard}
667
+ ${base_type_call};
668
+ }
669
+ """
670
+ )
671
+
672
+ SET_HISTORY = CodeTemplate(
673
+ """\
674
+ if (grad_fn) {
675
+ ${fn}_history(${differentiable_outputs}, grad_fn);
676
+ }
677
+ """
678
+ )
679
+
680
+ LOOP_OVER_VECTOR_OF_GRAD_FNS = CodeTemplate(
681
+ """\
682
+ if (!grad_fns.empty()) {
683
+ ${preamble}
684
+ for (const auto& i : c10::irange(grad_fns.size())) {
685
+ auto grad_fn = grad_fns[i];
686
+ if (grad_fn != nullptr) {
687
+ ${statements}
688
+ }
689
+ }
690
+ }
691
+ """
692
+ )
693
+
694
+ CONDITIONAL = CodeTemplate(
695
+ """\
696
+ if (${cond}) {
697
+ ${statements}
698
+ }
699
+ """
700
+ )
701
+
702
+ RUN_ONLY_IN_DEBUG_MODE = CodeTemplate(
703
+ """\
704
+ #ifndef NDEBUG
705
+ ${statements}
706
+ #endif
707
+ """
708
+ )
709
+
710
+ FW_DERIVATIVE_CHECK_TEMPLATE = CodeTemplate(
711
+ """\
712
+ isFwGradDefined(${req_inp})\
713
+ """
714
+ )
715
+ FW_DERIVATIVE_SIZE_CHECK_TEMPLATE = CodeTemplate(
716
+ """\
717
+ TORCH_CHECK(
718
+ self.size() == ${inp_name}.size(),
719
+ "Tensor lists must have the same number of tensors, got ",
720
+ self.size(),
721
+ " and ",
722
+ ${inp_name}.size());
723
+ """
724
+ )
725
+
726
+ FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE = CodeTemplate(
727
+ """\
728
+ isFwGradDefinedTensorList(${req_inp})\
729
+ """
730
+ )
731
+
732
+ FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE = CodeTemplate(
733
+ """\
734
+ auto ${inp_name}_t_raw = toNonOptFwGrad(${inp});
735
+ auto ${inp_name}_tensor = toNonOptTensor(${inp});
736
+ auto ${inp_name}_t = (${inp_name}_t_raw.defined() || !${inp_name}_tensor.defined())
737
+ ? ${inp_name}_t_raw : at::${zeros_fn}(${inp_name}_tensor.sizes(), ${inp_name}_tensor.options());
738
+ """
739
+ )
740
+
741
+ FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE = CodeTemplate(
742
+ """\
743
+ auto ${inp_name}_p = toNonOptPrimal(${inp});
744
+ """
745
+ )
746
+
747
+ FW_DERIVATIVE_SETTER_TENSOR = CodeTemplate(
748
+ """\
749
+ if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}.defined()) {
750
+ // The hardcoded 0 here will need to be updated once we support multiple levels.
751
+ ${out_arg}._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace});
752
+ }
753
+ """
754
+ )
755
+
756
+ FW_DERIVATIVE_SETTER_TENSOR_FOREACH = CodeTemplate(
757
+ """\
758
+ for (const auto& i : c10::irange(${out_arg}_new_fw_grad_opts.size())) {
759
+ auto& ${out_arg}_new_fw_grad_opt = ${out_arg}_new_fw_grad_opts[i];
760
+ if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}[i].defined()) {
761
+ // The hardcoded 0 here will need to be updated once we support multiple levels.
762
+ ${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace});
763
+ }
764
+ }
765
+ """
766
+ )
767
+
768
+ FW_DERIVATIVE_SETTER_MULTI_OUTPUT = CodeTemplate(
769
+ """\
770
+ if (${all_res}_new_fw_grad_opt.has_value() && std::get<${idx}>(${all_res}_new_fw_grad_opt.value()).defined()
771
+ && ${out_arg}.defined()) {
772
+ ${out_arg}._set_fw_grad(std::get<${idx}>(${all_res}_new_fw_grad_opt.value()), /* level */ 0, /* is_inplace_op */ false);
773
+ }
774
+ """
775
+ )
776
+
777
+ FW_DERIVATIVE_SETTER_TENSOR_LIST = CodeTemplate(
778
+ """\
779
+ if (${out_arg}_new_fw_grad_opt.has_value()) {
780
+ auto ${out_arg}_new_fw_grad = ${out_arg}_new_fw_grad_opt.value();
781
+ TORCH_INTERNAL_ASSERT(${out_arg}.size() == ${out_arg}_new_fw_grad.size());
782
+ for (const auto i : c10::irange(${out_arg}.size())) {
783
+ if (${out_arg}_new_fw_grad[i].defined() && ${out_arg}[i].defined()) {
784
+ // The hardcoded 0 here will need to be updated once we support multiple levels.
785
+ ${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad[i], /* level */ 0, /* is_inplace_op */ ${is_inplace});
786
+ }
787
+ }
788
+ }
789
+ """
790
+ )
791
+
792
+ FW_DERIVATIVE_TEMPLATE = CodeTemplate(
793
+ """\
794
+ ${fw_grad_opt_definition}
795
+ if (${requires_fw_grad}) {
796
+ ${unpacked_arguments}
797
+ ${out_arg}_new_fw_grad_opt = ${formula};
798
+ }
799
+ """
800
+ )
801
+
802
+ FW_DERIVATIVE_FOREACH_TEMPLATE = CodeTemplate(
803
+ """\
804
+ ${fw_grad_opt_definition}
805
+ for (const auto& i : c10::irange(${vector_of_optional_tensor}.size())) {
806
+ if (${any_has_forward_grad_for_current_index}) {
807
+ ${unpacked_arguments}
808
+ ${vector_of_optional_tensor}[i] = ${formula};
809
+ }
810
+ }
811
+ """
812
+ )
813
+
814
+ FW_DERIVATIVE_FORBID_TEMPLATE = CodeTemplate(
815
+ """\
816
+ TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
817
+ """
818
+ )
819
+
820
+ FW_DERIVATIVE_FORBID_LIST_TEMPLATE = CodeTemplate(
821
+ """\
822
+ for (const auto& _t: ${arg}) {
823
+ TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
824
+ }
825
+ """
826
+ )
827
+
828
+
829
+ def gen_variable_type(
830
+ out: str,
831
+ native_yaml_path: str,
832
+ tags_yaml_path: str,
833
+ fns_with_diff_infos: List[NativeFunctionWithDifferentiabilityInfo],
834
+ template_path: str,
835
+ used_keys: Set[str],
836
+ ) -> None:
837
+ """VariableType.h and VariableType.cpp body
838
+
839
+ This is the at::Type subclass for differentiable tensors. The
840
+ implementation of each function dispatches to the base tensor type to
841
+ compute the output. The grad_fn is attached to differentiable functions.
842
+ """
843
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
844
+ fm.write(
845
+ "VariableType.h",
846
+ lambda: {
847
+ "generated_comment": "@"
848
+ + f"generated from {fm.template_dir_for_comments()}/VariableType.h"
849
+ },
850
+ )
851
+
852
+ # helper that generates a TORCH_LIBRARY_IMPL macro for each
853
+ # dispatch key that appears in derivatives.yaml
854
+ def wrapper_registrations(used_keys: Set[str]) -> str:
855
+ library_impl_macro_list: List[str] = []
856
+ for key in sorted(used_keys):
857
+ dispatch_key = key
858
+ if key == "Default":
859
+ dispatch_key = "Autograd"
860
+ library_impl_macro = (
861
+ f"TORCH_LIBRARY_IMPL(aten, {dispatch_key}, m) "
862
+ + "{\n"
863
+ + "${"
864
+ + f"wrapper_registrations_{key}"
865
+ + "}\n}"
866
+ )
867
+ library_impl_macro_list += [library_impl_macro]
868
+ return "\n\n".join(library_impl_macro_list)
869
+
870
+ # Generate a new template from VariableType.cpp which replaces ${wrapper_registrations}
871
+ # with per key TORCH_LIBRARY_IMPL macros for each key that appears in derivatives.yaml
872
+ fm1 = FileManager(
873
+ install_dir=out + "/templates", template_dir=template_path, dry_run=False
874
+ )
875
+ fm1.write(
876
+ "VariableType.cpp",
877
+ lambda: {
878
+ "type_derived_method_definitions": "\n\n".join(
879
+ [
880
+ "${" + f"type_derived_method_definitions_{key}" + "}"
881
+ for key in sorted(used_keys)
882
+ ]
883
+ ),
884
+ "wrapper_registrations": wrapper_registrations(used_keys),
885
+ },
886
+ )
887
+
888
+ # Generate final VariableType_*.cpp files from the generated template
889
+ fm2 = FileManager(install_dir=out, template_dir=out + "/templates", dry_run=False)
890
+
891
+ sharded_keys = set(
892
+ [f"type_derived_method_definitions_{key}" for key in sorted(used_keys)]
893
+ + [f"wrapper_registrations_{key}" for key in sorted(used_keys)]
894
+ )
895
+ # NOTE: see Note [Sharded File] at the top of the VariableType.cpp
896
+ # template regarding sharding of the generated files.
897
+ fm2.write_sharded(
898
+ "VariableType.cpp",
899
+ [fn for fn in fns_with_diff_infos if use_derived(fn)],
900
+ key_fn=lambda fn: cpp.name(fn.func.func),
901
+ base_env={
902
+ "generated_comment": "@"
903
+ + f"generated from {fm.template_dir_for_comments()}/VariableType.cpp",
904
+ },
905
+ env_callable=gen_variable_type_func,
906
+ num_shards=5,
907
+ sharded_keys=sharded_keys,
908
+ )
909
+
910
+
911
+ @with_native_function_and
912
+ def gen_wrapper_registration(f: NativeFunction, key: str = "Default") -> str:
913
+ return WRAPPER_REGISTRATION.substitute(
914
+ unqual_operator_name_with_overload=f.func.name,
915
+ type_wrapper_name=type_wrapper_name(f, key),
916
+ class_type="VariableType",
917
+ )
918
+
919
+
920
+ def gen_variable_type_func(
921
+ fn: NativeFunctionWithDifferentiabilityInfo,
922
+ ) -> Dict[str, List[str]]:
923
+ f = fn.func
924
+ result = {}
925
+ with native_function_manager(f):
926
+ name = cpp.name(f.func)
927
+ formals = gen_formals(f)
928
+
929
+ if (
930
+ fn.info is None
931
+ and str(f.func.name.name) not in RESET_GRAD_ACCUMULATOR
932
+ and get_base_name(f) not in DONT_REQUIRE_DERIVATIVE
933
+ and len(gen_differentiable_outputs(fn)) > 0
934
+ and cpp.name(f.func) not in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE
935
+ and type_wrapper_name(f) not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
936
+ and type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT
937
+ ):
938
+ # NOTE: [ Registering AutogradNotImplemented boxed kernel ]
939
+ #
940
+ # When there is no derivatives.yaml entry, we register a generic boxed
941
+ # NotImplemented kernel to set grad_fn to be NotImplemented, so that forward
942
+ # proceeds as usual but an error is properly produced on backward.
943
+ # TODO: it would be nice to not have these special cases
944
+ #
945
+ # There are several cases where still let codegen handle it:
946
+ # 1) ops that need to reset grad accumulator (we let codegen handle this case
947
+ # because) the list is (currently) only accessible in Python.
948
+ # 2) User explicitly specifies DONT_REQUIRE_DERIVATIVE. This basically makes
949
+ # autograd a fallthrough with NDEBUG checks. This can be useful for when all
950
+ # outputs are integral.
951
+ # 3) When there are no differentiable outputs. This is similar to (2).
952
+ # 4) There are certain ops where we skip certain NDEBUG checks. this is similar
953
+ # to (1).
954
+ type_definition = ""
955
+ wrapper_registration = AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION.substitute(
956
+ unqual_operator_name_with_overload=f.func.name
957
+ )
958
+ result["type_derived_method_definitions_Default"] = [type_definition]
959
+ result["wrapper_registrations_Default"] = [wrapper_registration]
960
+ else:
961
+ if not fn.info:
962
+ key = "Default"
963
+ type_definition = METHOD_DEFINITION.substitute(
964
+ return_type=cpp.returns_type(
965
+ f.func.returns, symint=True
966
+ ).cpp_type(),
967
+ type_wrapper_name=type_wrapper_name(f, key),
968
+ type_definition_body=emit_body(fn, key),
969
+ formals=formals,
970
+ )
971
+ wrapper_registration = gen_wrapper_registration(f, key)
972
+ result[f"type_derived_method_definitions_{key}"] = [type_definition]
973
+ result[f"wrapper_registrations_{key}"] = [wrapper_registration]
974
+ else:
975
+ for key in fn.info.keys():
976
+ type_definition = METHOD_DEFINITION.substitute(
977
+ return_type=cpp.returns_type(
978
+ f.func.returns, symint=True
979
+ ).cpp_type(),
980
+ type_wrapper_name=type_wrapper_name(f, key),
981
+ type_definition_body=emit_body(fn, key),
982
+ formals=formals,
983
+ )
984
+ wrapper_registration = gen_wrapper_registration(f, key)
985
+ result[f"type_derived_method_definitions_{key}"] = [type_definition]
986
+ result[f"wrapper_registrations_{key}"] = [wrapper_registration]
987
+ # See Note [Manual Backend kernels]
988
+ assert (name in MANUAL_BACKEND) == f.manual_kernel_registration
989
+ # If you want to register a kernel to Autograd, you must make the op abstract.
990
+ # In other words, this op must have dispatch section in native_functions.yaml.
991
+ if name in MANUAL_AUTOGRAD_AND_TRACER or (
992
+ fn.info and any(info.has_derivatives for info in fn.info.values())
993
+ ):
994
+ msg = (
995
+ f"There's a formula for {name}(or its functional variant) in derivatives.yaml. "
996
+ f"It's required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA "
997
+ f"or CompositeExplicitAutograd in native_functions.yaml. Please see "
998
+ f"https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword "
999
+ f"for instructions to choose the right dispatch keyword."
1000
+ )
1001
+ assert f.is_abstract, msg
1002
+
1003
+ return result
1004
+
1005
+
1006
+ _foreach_ops_without_differentiability_info = {
1007
+ # No reference backward available due to the lack of `{maximum, minimum}(tensor, scalar)`.
1008
+ ("_foreach_maximum", "Scalar"),
1009
+ ("_foreach_maximum", "ScalarList"),
1010
+ ("_foreach_minimum", "Scalar"),
1011
+ ("_foreach_minimum", "ScalarList"),
1012
+ # No reference backward available as addcdiv/addcmul don't support Tensor as scaling factor.
1013
+ ("_foreach_addcdiv", "Tensor"),
1014
+ ("_foreach_addcmul", "Tensor"),
1015
+ ("_foreach_copy", ""),
1016
+ }
1017
+
1018
+ _foreach_ops_with_different_arity = {
1019
+ # These ops lack `alpha` of scaling factor to applied to the right hand side argument.
1020
+ ("_foreach_add", "Scalar"),
1021
+ ("_foreach_add", "ScalarList"),
1022
+ ("_foreach_sub", "Scalar"),
1023
+ ("_foreach_sub", "ScalarList"),
1024
+ }
1025
+
1026
+
1027
+ @with_native_function_with_differentiability_info_and_key
1028
+ def emit_body(
1029
+ fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
1030
+ ) -> List[str]:
1031
+ assert dispatch_strategy(fn) == "use_derived"
1032
+ f = fn.func
1033
+ info = fn.info[key] if fn.info else None
1034
+ fw_derivatives = fn.fw_derivatives.get(key, []) if fn.fw_derivatives else []
1035
+
1036
+ name = cpp.name(f.func)
1037
+ inplace = f.func.kind() == SchemaKind.inplace
1038
+ is_out_fn = f.func.kind() == SchemaKind.out
1039
+ returns_void = len(f.func.returns) == 0
1040
+ base_name = get_base_name(f)
1041
+ view_info = get_view_info(f)
1042
+
1043
+ is_foreach = name.startswith("_foreach")
1044
+ is_inplace_foreach = is_foreach and inplace
1045
+ if is_inplace_foreach:
1046
+ inplace_foreacharg2refarg: Dict[Argument, Argument] = {}
1047
+ refargname2inplace_foreacharg: Dict[str, Argument] = {}
1048
+ base_name_and_overload_name = (f.func.name.name.base, f.func.name.overload_name)
1049
+ if info is None:
1050
+ assert (
1051
+ base_name_and_overload_name
1052
+ in _foreach_ops_without_differentiability_info
1053
+ ), f"{'.'.join(base_name_and_overload_name)} should have a differentiability info"
1054
+ else:
1055
+ assert (
1056
+ len(f.func.arguments.flat_non_out)
1057
+ == len(info.func.func.arguments.flat_non_out)
1058
+ ) or (base_name_and_overload_name in _foreach_ops_with_different_arity), (
1059
+ f"{'.'.join(base_name_and_overload_name)} has {len(f.func.arguments.flat_non_out)} args "
1060
+ f"but the reference has {len(info.func.func.arguments.flat_non_out)}"
1061
+ )
1062
+ for foreach_arg, ref_arg in zip(
1063
+ f.func.arguments.flat_non_out, info.func.func.arguments.flat_non_out
1064
+ ):
1065
+ foreach_arg_type = foreach_arg.type
1066
+ if isinstance(foreach_arg_type, ListType):
1067
+ foreach_arg_type = foreach_arg_type.elem
1068
+ assert foreach_arg_type == ref_arg.type
1069
+ inplace_foreacharg2refarg[foreach_arg] = ref_arg
1070
+ refargname2inplace_foreacharg[ref_arg.name] = foreach_arg
1071
+
1072
+ def gen_differentiable_input(
1073
+ arg: Union[Argument, SelfArgument, TensorOptionsArguments]
1074
+ ) -> Optional[DifferentiableInput]:
1075
+ if isinstance(arg, TensorOptionsArguments):
1076
+ return None
1077
+ a: Argument = arg.argument if isinstance(arg, SelfArgument) else arg
1078
+
1079
+ # TODO: `cpp_type` is only to keep it byte-for-byte compatible with the old codegen, should remove.
1080
+ # NB: This is not a clone of cpp.argument() - TensorOptionsArguments / faithful / binds are
1081
+ # not handled properly as they are irrelevant for this codegen.
1082
+ cpp_type = cpp.argument_type(a, binds=a.name, symint=True).cpp_type()
1083
+
1084
+ if not is_differentiable(a.name, a.type, info):
1085
+ return None
1086
+ return DifferentiableInput(
1087
+ name=a.name,
1088
+ type=a.type,
1089
+ cpp_type=cpp_type,
1090
+ )
1091
+
1092
+ @with_native_function
1093
+ def gen_differentiable_inputs(f: NativeFunction) -> List[DifferentiableInput]:
1094
+ arguments = list(f.func.arguments.non_out)
1095
+ if is_inplace_foreach and info is not None:
1096
+ for i, arg in enumerate(f.func.arguments.flat_non_out):
1097
+ if arg in inplace_foreacharg2refarg:
1098
+ # note(crcrpar): From what I understand, what matters is only the name.
1099
+ # Thus originally I only replace argument only when the names are different.
1100
+ # TODO(crcrpar): Make it simpler.
1101
+ mapped_arg = inplace_foreacharg2refarg[arg]
1102
+ arguments[i] = Argument(
1103
+ mapped_arg.name,
1104
+ mapped_arg.type,
1105
+ mapped_arg.default,
1106
+ mapped_arg.annotation,
1107
+ )
1108
+ return list(mapMaybe(gen_differentiable_input, arguments))
1109
+
1110
+ def find_args_with_derivatives(
1111
+ differentiable_inputs: List[DifferentiableInput],
1112
+ ) -> List[DifferentiableInput]:
1113
+ """Find arguments that have derivative definitions"""
1114
+ if info is None or not info.has_derivatives:
1115
+ return differentiable_inputs
1116
+ names = {name for d in info.derivatives for name in d.var_names}
1117
+ differentiable = [arg for arg in differentiable_inputs if arg.name in names]
1118
+ if len(differentiable) != len(names):
1119
+ missing = names - {arg.name for arg in differentiable}
1120
+ raise RuntimeError(
1121
+ f"Missing arguments for derivatives: {missing} in {info.name}"
1122
+ )
1123
+ return differentiable
1124
+
1125
+ differentiable_inputs = gen_differentiable_inputs(f)
1126
+ args_with_derivatives = find_args_with_derivatives(differentiable_inputs)
1127
+ differentiable_outputs = gen_differentiable_outputs(fn, key)
1128
+
1129
+ undifferentiable = (base_name in DONT_REQUIRE_DERIVATIVE) or (
1130
+ name in DONT_REQUIRE_DERIVATIVE
1131
+ )
1132
+
1133
+ requires_derivative = (
1134
+ (not undifferentiable)
1135
+ and (len(differentiable_inputs) > 0)
1136
+ and (
1137
+ (len(differentiable_outputs) > 0)
1138
+ # note(crcrpar): In-place foreach functions are a void function.
1139
+ or is_inplace_foreach
1140
+ )
1141
+ )
1142
+
1143
+ if (
1144
+ info is not None
1145
+ and info.has_derivatives
1146
+ and not requires_derivative
1147
+ # out= ops are allowed to have zero returns which cause requires_derivative to be False
1148
+ # we shouldn't error out though (out= ops for autograd just redispatch)
1149
+ and len(f.func.returns) > 0
1150
+ ):
1151
+ raise RuntimeError(
1152
+ f"ERROR: derivative ignored for {name} -- specified an autograd function without derivative"
1153
+ )
1154
+
1155
+ # note(crcrpar): In-place foreach functions do not support forward AD
1156
+ if requires_derivative and len(fw_derivatives) > 0 and not is_inplace_foreach:
1157
+ assert sum(len(derivative.var_names) for derivative in fw_derivatives) == len(
1158
+ differentiable_outputs
1159
+ ), (
1160
+ "Expected the number of forward derivatives implemented to match the "
1161
+ "number of differentiable outputs. NB: This only applies when at least "
1162
+ "one forward derivative is implemented. Not implementing any forward "
1163
+ "derivatives is also okay, and we would require inputs to the op to "
1164
+ "not have associated tangents in that case."
1165
+ )
1166
+
1167
+ try_jit_decomposition = (
1168
+ requires_derivative
1169
+ and len(fw_derivatives) == 0
1170
+ and (not modifies_arguments(f))
1171
+ and (not returns_void)
1172
+ )
1173
+
1174
+ def emit_save_inputs() -> List[str]:
1175
+ setup: List[str] = []
1176
+ if info is None or not info.has_derivatives:
1177
+ return setup
1178
+
1179
+ has_tensorlist_arg = any(
1180
+ is_tensor_list_type(arg.type) for arg in args_with_derivatives
1181
+ )
1182
+
1183
+ # We don't want to save tensors if we know that they will never be used
1184
+ # when computing the derivative, so we add guards to those statements
1185
+ def guard_for(arg: SavedAttribute) -> Optional[str]:
1186
+ assert info is not None
1187
+
1188
+ # It's hard to determine the edge offset if we have TensorLists
1189
+ # NOTE(crcrpar): in-place foreach functions' arguments include tensorlist
1190
+ # but their derivatives don't use it, so let them bypass this check.
1191
+ if has_tensorlist_arg and (not is_inplace_foreach):
1192
+ return None
1193
+
1194
+ # Empirical evaluation of the cases where we insert those guards in
1195
+ # backward show that they are somewhat useless. E.g. there's no need
1196
+ # to guard on some values captured from forward, because they had to
1197
+ # require_grad if the backward function even gets executed. I don't
1198
+ # have any good ideas for detecting those cases, so I simply disabled the
1199
+ # checks.
1200
+ if "backward" in info.name:
1201
+ return None
1202
+
1203
+ # If there's a single derivative we could compute, we already have
1204
+ # a requires_grad check that is sufficient
1205
+ if len(args_with_derivatives) <= 1:
1206
+ return None
1207
+
1208
+ # We really only care about trimming down the amount of tensors we save
1209
+ if arg.nctype.type != BaseCType(tensorT):
1210
+ return None
1211
+
1212
+ # We want to emit simple guards, so we only allow that if checking one
1213
+ # input is enough to determine whether we need that value
1214
+ used_in = [d for d in info.derivatives if arg in d.saved_inputs]
1215
+ assert len(used_in) > 0
1216
+ if len(used_in) != 1:
1217
+ return None
1218
+ derivative = used_in[0]
1219
+
1220
+ # Case with multioutput formulas
1221
+ # TODO: process all derivative formulas!!!
1222
+ if len(derivative.var_names) != 1:
1223
+ wrap_opt_if_start = derivative.formula.find(
1224
+ f"wrap_opt_if({arg.nctype.name}"
1225
+ )
1226
+ if wrap_opt_if_start == -1:
1227
+ return None
1228
+
1229
+ wrap_opt_if_match = re.match(
1230
+ rf"wrap_opt_if\({arg.nctype.name},(.*?)\)",
1231
+ derivative.formula[wrap_opt_if_start:],
1232
+ )
1233
+ assert wrap_opt_if_match is not None
1234
+
1235
+ # Condition is between 'wrap_opt_if(var_name,' and ')'.
1236
+ condition_slice = slice(len(rf"wrap_opt_if\({arg.nctype.name},"), -1)
1237
+ wrap_opt_if_condition = wrap_opt_if_match.group(0)[
1238
+ condition_slice
1239
+ ].strip()
1240
+ # replace 'grad_input_mask[num]' with 'grad_fn->should_compute_output(num)'
1241
+ wrap_opt_if_condition = re.sub(
1242
+ r"grad_input_mask\[(\d+)\]",
1243
+ r"grad_fn->should_compute_output(\1)",
1244
+ wrap_opt_if_condition,
1245
+ )
1246
+ return f"{wrap_opt_if_condition}"
1247
+
1248
+ # Figure out the offset of the edge that uses this variable
1249
+ derivative_var_name = derivative.var_names[0]
1250
+ for edge_off, a in enumerate(args_with_derivatives):
1251
+ if a.name == derivative_var_name:
1252
+ break
1253
+ else:
1254
+ raise AssertionError()
1255
+ return f"grad_fn->should_compute_output({edge_off})"
1256
+
1257
+ if is_inplace_foreach:
1258
+ save_input_stmts = save_variables(info.all_saved_inputs, False, guard_for)
1259
+ if save_input_stmts:
1260
+ setup.append(
1261
+ LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
1262
+ preamble="", statements=save_input_stmts
1263
+ )
1264
+ )
1265
+ else:
1266
+ setup.extend(save_variables(info.all_saved_inputs, False, guard_for))
1267
+ for arg in args_with_derivatives:
1268
+ if is_tensor_list_type(arg.type):
1269
+ setup.append(f"grad_fn->{arg.name}_size_ = {arg.name}.size();")
1270
+ return setup
1271
+
1272
+ def setup_derivative(differentiable_inputs: List[DifferentiableInput]) -> List[str]:
1273
+ body: List[str] = []
1274
+ if is_out_fn:
1275
+ # For out functions, ensure that no input or output requires grad
1276
+ body.append(DECLARE_GRAD_FN.substitute(op="Node"))
1277
+ body.append(
1278
+ SETUP_NONE_REQUIRES_GRAD.substitute(
1279
+ base_name=base_name,
1280
+ args_to_check=[arg.name for arg in differentiable_inputs],
1281
+ )
1282
+ )
1283
+ body.append(
1284
+ SETUP_NONE_REQUIRES_GRAD.substitute(
1285
+ base_name=base_name,
1286
+ args_to_check=[arg.name for arg in differentiable_outputs],
1287
+ )
1288
+ )
1289
+ return body
1290
+
1291
+ op = info.op if info is not None and info.has_derivatives else "NotImplemented"
1292
+ setup = []
1293
+ if not is_inplace_foreach:
1294
+ setup.extend(
1295
+ ASSIGN_GRAD_FN.substitute(
1296
+ op=op,
1297
+ op_ctor=""
1298
+ if info is not None and info.has_derivatives
1299
+ else f'"{cpp.name(f.func)}"',
1300
+ args_with_derivatives=[arg.name for arg in args_with_derivatives],
1301
+ ).split("\n")
1302
+ )
1303
+ else:
1304
+ # note(crcrpar): Assuming in-place foreach function's self_arg is always TensorList.
1305
+ list_like_arg = "self"
1306
+ args = [arg.name for arg in args_with_derivatives]
1307
+ for i, arg in enumerate(args):
1308
+ if is_inplace_foreach and info is not None:
1309
+ if arg in refargname2inplace_foreacharg:
1310
+ foreach_arg = refargname2inplace_foreacharg[arg]
1311
+ args[i] = foreach_arg.name + (
1312
+ "[i]" if isinstance(foreach_arg.type, ListType) else ""
1313
+ )
1314
+ else:
1315
+ if arg == list_like_arg:
1316
+ args[i] = arg + "[i]"
1317
+ setup.extend(
1318
+ ASSIGN_VECTOR_OF_GRAD_FN.substitute(
1319
+ op=op,
1320
+ op_ctor=""
1321
+ if info is not None and info.has_derivatives
1322
+ else f'"{cpp.name(f.func)}"',
1323
+ args_with_derivatives=args,
1324
+ irange=f"{list_like_arg}.size()",
1325
+ ).split("\n")
1326
+ )
1327
+ setup.extend(emit_save_inputs())
1328
+
1329
+ body.extend(
1330
+ emit_check_no_requires_grad(differentiable_inputs, args_with_derivatives)
1331
+ )
1332
+ declare_grad_fn_template = (
1333
+ DECLARE_GRAD_FN if not is_inplace_foreach else DECLARE_VECTOR_OF_GRAD_FN
1334
+ )
1335
+ body.append(declare_grad_fn_template.substitute(op=op))
1336
+ body.append(SETUP_DERIVATIVE.substitute(setup=setup))
1337
+ return body
1338
+
1339
+ def emit_check_if_in_complex_autograd_allowlist() -> List[str]:
1340
+ body: List[str] = []
1341
+ if base_name in GRADIENT_IMPLEMENTED_FOR_COMPLEX:
1342
+ return body
1343
+ for arg in differentiable_outputs:
1344
+ name = arg.name
1345
+ # TODO: should be `arg.type.is_tensor_like()`?
1346
+ if arg.cpp_type == "at::Tensor" or arg.cpp_type in TENSOR_LIST_LIKE_CTYPES:
1347
+ body.append(f'throw_error_for_complex_autograd({name}, "{base_name}");')
1348
+ return body
1349
+
1350
+ def emit_check_no_requires_grad(
1351
+ tensor_args: List[DifferentiableInput],
1352
+ args_with_derivatives: List[DifferentiableInput],
1353
+ ) -> List[str]:
1354
+ """Checks that arguments without derivatives don't require grad"""
1355
+ body: List[str] = []
1356
+ for arg in tensor_args:
1357
+ if arg in args_with_derivatives:
1358
+ continue
1359
+ arg_name = arg.name
1360
+ if info and arg_name in info.non_differentiable_arg_names:
1361
+ continue
1362
+ if arg_name == "output":
1363
+ # Double-backwards definitions sometimes take in 'input' and
1364
+ # 'output', but only define the derivative for input.
1365
+ continue
1366
+ body.append(f'check_no_requires_grad({arg_name}, "{arg_name}", "{name}");')
1367
+ return body
1368
+
1369
+ def emit_original_self_definition() -> List[str]:
1370
+ body: List[str] = []
1371
+ if inplace:
1372
+ if is_inplace_foreach:
1373
+ body.append(
1374
+ "std::vector<c10::optional<at::Tensor>> original_selfs(self.size());"
1375
+ )
1376
+ else:
1377
+ body.append("c10::optional<at::Tensor> original_self;")
1378
+
1379
+ all_forward_grad_cond = []
1380
+ for derivative in fw_derivatives:
1381
+ if derivative.required_original_self_value:
1382
+ all_forward_grad_cond.append(
1383
+ get_any_has_forward_grad_name(derivative.var_names)
1384
+ )
1385
+
1386
+ if all_forward_grad_cond:
1387
+ if not is_inplace_foreach:
1388
+ body.append(f'if ({" || ".join(all_forward_grad_cond)}) {{')
1389
+ body.append(" original_self = self.clone();")
1390
+ body.append("}")
1391
+ else:
1392
+ current_all_forward_grad_cond = [
1393
+ f"{cond}[i]" for cond in all_forward_grad_cond
1394
+ ]
1395
+ body.append("for (const auto& i : c10::irange(self.size())) {")
1396
+ body.append(
1397
+ f" if ({' || '.join(current_all_forward_grad_cond)}) {{"
1398
+ )
1399
+ body.append(" original_selfs[i] = self[i].clone();")
1400
+ body.append(" }")
1401
+ body.append("}")
1402
+
1403
+ return body
1404
+
1405
+ def save_variables(
1406
+ saved_variables: Sequence[SavedAttribute],
1407
+ is_output: bool,
1408
+ guard_for: Callable[[SavedAttribute], Optional[str]] = lambda name: None,
1409
+ ) -> Sequence[str]:
1410
+ # assign the saved variables to the generated grad_fn
1411
+ stmts: List[str] = []
1412
+ for arg in sorted(saved_variables, key=lambda sa: str(sa.nctype.name)):
1413
+ name = (
1414
+ arg.nctype.name.name
1415
+ if isinstance(arg.nctype.name, SpecialArgName)
1416
+ else arg.nctype.name
1417
+ )
1418
+ foreacharg: Optional[Argument] = None
1419
+ is_foreacharg_list_type: bool = False
1420
+ type = arg.nctype.type
1421
+ expr = arg.expr
1422
+ stmts_prepend = None
1423
+ if is_inplace_foreach and info is not None:
1424
+ # todo(crcrpar): See if we can add some check e.g. `assert foreacharg is not None`.
1425
+ # for now the example assert would fail.
1426
+ name_to_query = name.split("_scalar_type")[0]
1427
+ if name_to_query in refargname2inplace_foreacharg:
1428
+ foreacharg = refargname2inplace_foreacharg[name_to_query]
1429
+ is_foreacharg_list_type = isinstance(foreacharg.type, ListType)
1430
+ if foreacharg is not None:
1431
+ name_in_expr = (
1432
+ f"{foreacharg.name}{'[i]' if is_foreacharg_list_type else ''}"
1433
+ )
1434
+ src_name = name
1435
+ if "_scalar_type" in src_name:
1436
+ split_src_name = src_name.split("_scalar_type")
1437
+ assert len(split_src_name) == 2
1438
+ src_name = split_src_name[0]
1439
+ expr = expr.replace(src_name, name_in_expr)
1440
+ if (
1441
+ type == BaseCType(tensorT)
1442
+ or type == OptionalCType(BaseCType(tensorT))
1443
+ or type == MutRefCType(OptionalCType(BaseCType(tensorT)))
1444
+ or (is_output and type == BaseCType(scalarT))
1445
+ ):
1446
+ # note(crcrpar): Here `expr` is generated from scratch, `arg.expr` is ignored.
1447
+ var = name
1448
+ name += "_"
1449
+ if var == "self" and inplace:
1450
+ original_self_var = (
1451
+ "original_self"
1452
+ if not is_inplace_foreach
1453
+ else "original_selfs[i]"
1454
+ )
1455
+ self_var = var if not is_inplace_foreach else var + "[i]"
1456
+ stmts_prepend = f"if (!{original_self_var}.has_value()) {original_self_var} = {self_var}.clone()"
1457
+ var = f"{original_self_var}.value()"
1458
+ assert not is_output
1459
+ if inplace and is_output:
1460
+ assert name == "result_"
1461
+ var = (
1462
+ "self[i]"
1463
+ if is_inplace_foreach or is_foreacharg_list_type
1464
+ else "self"
1465
+ )
1466
+ is_inplace_view = f"{var}.is_view()"
1467
+ expr = f"SavedVariable({var}, {str(is_output).lower()}, {is_inplace_view})"
1468
+ else:
1469
+ expr = f"SavedVariable({var}, {str(is_output).lower()})"
1470
+ if foreacharg is not None and "original_selfs" not in expr:
1471
+ expr = expr.replace(src_name, name_in_expr)
1472
+ elif (
1473
+ type == BaseCType(tensorListT)
1474
+ or type == ListCType(OptionalCType(BaseCType(tensorT)))
1475
+ or type == BaseCType(iTensorListRefT)
1476
+ or type == VectorCType(BaseCType(tensorT))
1477
+ ):
1478
+ # See Note [nuanced return type of out-of-place foreach functions]
1479
+ if type == VectorCType(BaseCType(tensorT)):
1480
+ assert is_foreach and is_output
1481
+ expr = f"make_saved_variable_list({name}, {str(is_foreach and is_output).lower()})"
1482
+ name += "_"
1483
+ elif type == BaseCType(intArrayRefT):
1484
+ expr = expr + ".vec()"
1485
+ elif type == BaseCType(symIntArrayRefT):
1486
+ expr = expr + ".vec()"
1487
+ elif type == BaseCType(stringT):
1488
+ expr = f"std::string({expr})"
1489
+ elif type == OptionalCType(BaseCType(stringT)):
1490
+ expr = f"{expr}.has_value() ? c10::optional<std::string>(std::string({expr}.value())) : c10::nullopt"
1491
+ elif type == ArrayRefCType(
1492
+ elem=BaseCType(type=BaseCppType(ns="at", name="Scalar"))
1493
+ ):
1494
+ expr = expr + ".vec()"
1495
+
1496
+ guard = guard_for(arg)
1497
+ if guard is None:
1498
+ if stmts_prepend:
1499
+ stmts.append(f"{stmts_prepend};")
1500
+ stmts.append(f"grad_fn->{name} = {expr};")
1501
+ else:
1502
+ stmts.append(f"if ({guard}) {{")
1503
+ if stmts_prepend:
1504
+ stmts.append(f" {stmts_prepend};")
1505
+ stmts.append(f" grad_fn->{name} = {expr};")
1506
+ stmts.append("}")
1507
+ return stmts
1508
+
1509
+ # Generates a Dispatcher::redispatch() call into the dispatcher. We do this mainly for performance reasons:
1510
+ # - Pre-compute the full DispatchKeySet. This saves the dispatcher from having to read from TLS.
1511
+ # - redispatch() avoids a redundant call to RecordFunction, which was already called right before
1512
+ # we entered this autograd kernel.
1513
+ def emit_dispatch_call(
1514
+ f: NativeFunction, input_base: str, unpacked_args: Sequence[str]
1515
+ ) -> str:
1516
+ """Dispatch call via function in a namespace or method on Tensor."""
1517
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
1518
+ dispatcher_exprs = dispatcher_sig.exprs()
1519
+
1520
+ # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
1521
+ # Ops also always have a function variant of the redispatch API.
1522
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
1523
+ dispatch_key_set = "ks & c10::after_autograd_keyset"
1524
+ call = CALL_REDISPATCH.substitute(
1525
+ api_name=cpp.name(
1526
+ f.func,
1527
+ faithful_name_for_out_overloads=True,
1528
+ symint_overload=f.func.has_symint(),
1529
+ ),
1530
+ unpacked_args=[dispatch_key_set] + list(unpacked_args),
1531
+ )
1532
+ return call
1533
+
1534
+ def wrap_output(
1535
+ f: NativeFunction, unpacked_bindings: List[Binding], var: str
1536
+ ) -> str:
1537
+ call = ""
1538
+ rhs_value: Optional[str] = None
1539
+ if not any(r.type.is_tensor_like() for r in f.func.returns):
1540
+ rhs_value = var
1541
+ else:
1542
+ rhs_value = f"std::move({var})"
1543
+ assert rhs_value is not None
1544
+ call += ASSIGN_RETURN_VALUE.substitute(
1545
+ return_values=tie_return_values(f), rhs_value=rhs_value
1546
+ )
1547
+ return call
1548
+
1549
+ def check_tensorimpl_and_storage(
1550
+ call: str, unpacked_bindings: List[Binding]
1551
+ ) -> str:
1552
+ # See NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
1553
+ stmts_before_call: List[str] = []
1554
+ stmts_after_call: List[str] = []
1555
+
1556
+ if cpp.name(f.func) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE:
1557
+ return call
1558
+
1559
+ # Check properties of inputs (enforce (1))
1560
+ for unpacked_binding in unpacked_bindings:
1561
+ arg = unpacked_binding.name
1562
+ noref_cpp_type = unpacked_binding.nctype.type.remove_const_ref()
1563
+ if noref_cpp_type == BaseCType(tensorListT) or noref_cpp_type == BaseCType(
1564
+ iTensorListRefT
1565
+ ):
1566
+ stmts_before_call += [
1567
+ SAVE_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
1568
+ SAVE_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
1569
+ ]
1570
+ stmts_after_call += [
1571
+ ENFORCE_SAME_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
1572
+ ENFORCE_SAME_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
1573
+ ]
1574
+ elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
1575
+ stmts_before_call += [
1576
+ SAVE_OPTIONALTENSORLIST_STORAGE.substitute(tensorlist_name=arg),
1577
+ SAVE_OPTIONALTENSORLIST_IMPL.substitute(tensorlist_name=arg),
1578
+ ]
1579
+ stmts_after_call += [
1580
+ ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE.substitute(
1581
+ tensorlist_name=arg
1582
+ ),
1583
+ ENFORCE_SAME_OPTIONALTENSORLIST_IMPL.substitute(
1584
+ tensorlist_name=arg
1585
+ ),
1586
+ ]
1587
+ elif noref_cpp_type == BaseCType(tensorT):
1588
+ stmts_before_call += [
1589
+ SAVE_TENSOR_STORAGE.substitute(tensor_name=arg),
1590
+ SAVE_TENSOR_IMPL.substitute(tensor_name=arg),
1591
+ ]
1592
+ stmts_after_call += [
1593
+ ENFORCE_SAME_TENSOR_STORAGE.substitute(
1594
+ tensor_name=arg, out_tensor_name=arg
1595
+ ),
1596
+ ENFORCE_SAME_TENSOR_IMPL.substitute(tensor_name=arg),
1597
+ ]
1598
+
1599
+ assert (stmts_before_call and stmts_after_call) or (
1600
+ not stmts_before_call and not stmts_after_call
1601
+ )
1602
+
1603
+ # Check properties of outputs (enforce (2), (3))
1604
+ if f.func.kind() not in (SchemaKind.inplace, SchemaKind.out):
1605
+ base_name = f.func.name.name.base # TODO: should be str(f.func.name.name)?
1606
+ aliased_arg_name = ALL_VIEW_FUNCTIONS.get(base_name, None)
1607
+ if aliased_arg_name is not None:
1608
+ aliased_arg_name = unpacked_name(aliased_arg_name)
1609
+ for i, (ret, ret_name) in enumerate(
1610
+ zip(f.func.returns, cpp.return_names(f))
1611
+ ):
1612
+ noref_cpp_type = cpp.return_type(ret, symint=True).remove_const_ref()
1613
+ if noref_cpp_type == BaseCType(tensorT):
1614
+ if aliased_arg_name is not None:
1615
+ assert (
1616
+ i == 0
1617
+ ), "Expect non-CompositeImplicitAutograd view function {base} to return single output"
1618
+ stmts_after_call += [
1619
+ ENFORCE_SAME_TENSOR_STORAGE.substitute(
1620
+ tensor_name=aliased_arg_name, out_tensor_name=ret_name
1621
+ )
1622
+ ]
1623
+ else:
1624
+ if (
1625
+ type_wrapper_name(f)
1626
+ not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
1627
+ ):
1628
+ stmts_after_call += [
1629
+ ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE.substitute(
1630
+ tensor_name=ret_name, fn_name=type_wrapper_name(f)
1631
+ )
1632
+ ]
1633
+
1634
+ if type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT:
1635
+ stmts_after_call += [
1636
+ ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE.substitute(
1637
+ tensor_name=ret_name, fn_name=type_wrapper_name(f)
1638
+ )
1639
+ ]
1640
+
1641
+ # Currently we don't have any functions that return the following types, but
1642
+ # we should update the checks once we do
1643
+ elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
1644
+ raise AssertionError(
1645
+ f"Please add use_count checks for {noref_cpp_type}"
1646
+ )
1647
+ elif noref_cpp_type == BaseCType(tensorListT):
1648
+ raise AssertionError(
1649
+ f"Please add use_count checks for {noref_cpp_type}"
1650
+ )
1651
+
1652
+ if stmts_before_call and stmts_after_call:
1653
+ call = (
1654
+ RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_before_call)
1655
+ + call
1656
+ + RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_after_call)
1657
+ )
1658
+ return call
1659
+
1660
+ def emit_call(
1661
+ f: NativeFunction, unpacked_bindings: List[Binding], try_jit_decomposition: bool
1662
+ ) -> str:
1663
+ # We only care about adding `at::AutoDispatchBelowAutograd` guard for non-variable dispatch
1664
+ # (which corresponds to 'use_derived' strategy). The purpose of this guard is to make sure
1665
+ # the baseType operations still dispatch to non-Variable type, even if the arguments passed
1666
+ # in are now Variables.
1667
+ # See NOTE [ Treating Variables as non-Variables in type dispatch ] for details.
1668
+ unpacked_args = [b.name for b in unpacked_bindings]
1669
+ base_type_call = emit_dispatch_call(f, "self_", unpacked_args)
1670
+
1671
+ if get_view_info(f) is not None or modifies_arguments(f):
1672
+ guard = "at::AutoDispatchBelowAutograd guard;"
1673
+ else:
1674
+ guard = "at::AutoDispatchBelowADInplaceOrView guard;"
1675
+
1676
+ any_has_forward_grad = (
1677
+ get_any_has_fw_grad_cond(derivative=None)
1678
+ if requires_derivative
1679
+ else "false"
1680
+ )
1681
+ return_types = ", ".join(
1682
+ [cpp.return_type(a, symint=True).cpp_type() for a in f.func.returns]
1683
+ )
1684
+ if len(f.func.returns) > 1:
1685
+ return_types = f"std::tuple<{return_types}>"
1686
+
1687
+ arg_names = [
1688
+ a.name
1689
+ for a in cpp.arguments(
1690
+ f.func.arguments,
1691
+ faithful=True,
1692
+ symint=True,
1693
+ method=False,
1694
+ cpp_no_default_args=set(),
1695
+ )
1696
+ ]
1697
+
1698
+ if not modifies_arguments(f) and not returns_void:
1699
+ if try_jit_decomposition:
1700
+ call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP.substitute(
1701
+ base_type_call=base_type_call,
1702
+ tmp_var=TMP_VAR,
1703
+ guard=guard,
1704
+ any_has_forward_grad=any_has_forward_grad,
1705
+ op_name=cpp.name(f.func),
1706
+ op_overload=f.func.name.overload_name,
1707
+ return_types=return_types,
1708
+ arg_names=arg_names,
1709
+ )
1710
+ else:
1711
+ call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES.substitute(
1712
+ base_type_call=base_type_call,
1713
+ tmp_var=TMP_VAR,
1714
+ guard=guard,
1715
+ )
1716
+
1717
+ call += wrap_output(f, unpacked_bindings, TMP_VAR)
1718
+ else:
1719
+ assert not try_jit_decomposition
1720
+ call = DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES.substitute(
1721
+ base_type_call=base_type_call, guard=guard
1722
+ )
1723
+ call = check_tensorimpl_and_storage(call, unpacked_bindings)
1724
+ return call
1725
+
1726
+ def emit_history() -> str:
1727
+ fn = "rebase" if modifies_arguments(f) and view_info is None else "set"
1728
+ output_names = [r.name for r in differentiable_outputs]
1729
+ # TODO: flatten allocates a std::vector, which could be expensive
1730
+ outs = CodeTemplate("flatten_tensor_args( ${outs} )").substitute(
1731
+ outs=output_names if not is_inplace_foreach else "self"
1732
+ )
1733
+ if not is_inplace_foreach:
1734
+ return SET_HISTORY.substitute(fn=fn, differentiable_outputs=outs)
1735
+ else:
1736
+ return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
1737
+ preamble=(
1738
+ f"auto differentiable_outputs = {outs};\n"
1739
+ f"TORCH_INTERNAL_ASSERT(differentiable_outputs.size() == grad_fns.size());"
1740
+ ),
1741
+ statements=f"{fn}_history(differentiable_outputs[i], grad_fns[i]);",
1742
+ )
1743
+
1744
+ def emit_save_outputs() -> str:
1745
+ if is_out_fn:
1746
+ # out functions don't currently support differentiation
1747
+ return ""
1748
+ if info is not None and info.has_derivatives:
1749
+ stmts = save_variables(info.all_saved_outputs, True)
1750
+ if len(stmts) == 0:
1751
+ return ""
1752
+ if not is_inplace_foreach:
1753
+ return CONDITIONAL.substitute(cond="grad_fn", statements=stmts)
1754
+ else:
1755
+ return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
1756
+ preamble="", statements=stmts
1757
+ )
1758
+ return ""
1759
+
1760
+ def emit_any_requires_grad() -> List[str]:
1761
+ extra_condition = ""
1762
+ if info and info.output_differentiability_conditions:
1763
+ assert len(info.output_differentiability_conditions) == 1
1764
+ extra_condition = f"_any_requires_grad &= ({info.output_differentiability_conditions[0]});"
1765
+ names_of_args_with_derivatives = [arg.name for arg in args_with_derivatives]
1766
+ if is_inplace_foreach and info is not None:
1767
+ for i, arg in enumerate(names_of_args_with_derivatives):
1768
+ for f_arg, r_arg in inplace_foreacharg2refarg.items():
1769
+ if arg == r_arg.name:
1770
+ names_of_args_with_derivatives[i] = f_arg.name
1771
+ return [
1772
+ SETUP_ANY_REQUIRES_GRAD.substitute(
1773
+ args_with_derivatives=names_of_args_with_derivatives,
1774
+ extra_differentiability_conditions=extra_condition,
1775
+ )
1776
+ ]
1777
+
1778
+ def get_any_has_forward_grad_name(var_names: Tuple[str, ...]) -> str:
1779
+ if len(var_names) == 1:
1780
+ return f"_any_has_forward_grad_{var_names[0]}"
1781
+ else:
1782
+ return f'_any_has_forward_grad_{"_".join(var_names)}'
1783
+
1784
+ def emit_any_has_forward_grad() -> List[str]:
1785
+ content: List[str] = []
1786
+ if not is_foreach:
1787
+ for derivative in fw_derivatives:
1788
+ requires_fw_grad = get_any_has_fw_grad_cond(derivative=derivative)
1789
+ if info and info.output_differentiability_conditions:
1790
+ assert len(info.output_differentiability_conditions) == 1
1791
+ requires_fw_grad = f"({info.output_differentiability_conditions[0]}) && {requires_fw_grad}"
1792
+ content.append(
1793
+ f"[[maybe_unused]] auto {get_any_has_forward_grad_name(derivative.var_names)} = {requires_fw_grad};"
1794
+ )
1795
+ else:
1796
+ for derivative in fw_derivatives:
1797
+ bool_vector_name = get_any_has_forward_grad_name(derivative.var_names)
1798
+ cur_derivative_conditions = []
1799
+ for inp in differentiable_inputs:
1800
+ if derivative.required_inputs_fw_grad is None:
1801
+ continue
1802
+ if inp.name not in derivative.required_inputs_fw_grad:
1803
+ continue
1804
+ inp_name = (
1805
+ inp.name
1806
+ if not inplace
1807
+ else refargname2inplace_foreacharg[inp.name].name
1808
+ )
1809
+ inp_type = (
1810
+ inp.type
1811
+ if not inplace
1812
+ else refargname2inplace_foreacharg[inp.name].type
1813
+ )
1814
+ is_list_type = is_tensor_list_type(inp_type)
1815
+ if is_list_type:
1816
+ if inp_name != "self":
1817
+ content.append(
1818
+ FW_DERIVATIVE_SIZE_CHECK_TEMPLATE.substitute(
1819
+ inp_name=inp_name
1820
+ )
1821
+ )
1822
+ cur_derivative_conditions.append(
1823
+ FW_DERIVATIVE_CHECK_TEMPLATE.substitute(
1824
+ req_inp=inp_name + "[i]"
1825
+ )
1826
+ )
1827
+ else:
1828
+ cur_derivative_conditions.append(
1829
+ FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp_name)
1830
+ )
1831
+
1832
+ content.append(f"std::vector<bool> {bool_vector_name}(self.size());")
1833
+ content.append("for (const auto& i : c10::irange(self.size())) {")
1834
+ content.append(
1835
+ f" {bool_vector_name}[i] = {' || '.join(cur_derivative_conditions)};"
1836
+ )
1837
+ content.append("}")
1838
+ return content
1839
+
1840
+ def emit_check_inplace() -> List[str]:
1841
+ if not inplace:
1842
+ return []
1843
+ return [
1844
+ f"check_inplace({arg.name}, _any_requires_grad);"
1845
+ for arg in differentiable_outputs
1846
+ ]
1847
+
1848
+ def emit_fw_derivatives() -> List[str]:
1849
+ content: List[str] = []
1850
+ fw_grad_setters: List[str] = []
1851
+ for derivative in fw_derivatives:
1852
+ res = derivative.var_names
1853
+ if f.func.name.name.inplace:
1854
+ assert (
1855
+ len(res) == 1
1856
+ ), "Expected number of outputs to be 1 if function is inplace"
1857
+ # TODO update this when inplace namings are unified
1858
+ res = ("self",)
1859
+
1860
+ assert derivative.required_inputs_fw_grad is not None
1861
+
1862
+ unpacked_arguments = ""
1863
+ for inp in differentiable_inputs:
1864
+ inp_name = inp.name
1865
+ is_input_tensorlist = is_foreach and is_tensor_list_type(
1866
+ inp.type
1867
+ if not inplace
1868
+ else refargname2inplace_foreacharg[inp.name].type
1869
+ )
1870
+ input_suffix = "[i]" if is_input_tensorlist else ""
1871
+ if is_inplace_foreach:
1872
+ if inp.name in refargname2inplace_foreacharg:
1873
+ inp_name = refargname2inplace_foreacharg[inp.name].name
1874
+ zeros_fn = (
1875
+ "zeros"
1876
+ if inplace and inp.name == "self"
1877
+ else "_efficientzerotensor"
1878
+ )
1879
+ if inp.name in derivative.required_inputs_fw_grad:
1880
+ unpacked_arguments += (
1881
+ FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
1882
+ inp_name=inp.name,
1883
+ inp=inp_name + input_suffix,
1884
+ zeros_fn=zeros_fn,
1885
+ )
1886
+ )
1887
+ if inp.name in (derivative.required_inputs_primal or []):
1888
+ unpacked_arguments += (
1889
+ FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(
1890
+ inp_name=inp.name,
1891
+ inp=inp_name + input_suffix,
1892
+ )
1893
+ )
1894
+ if derivative.required_original_self_value:
1895
+ input_suffix = "s[i]" if is_inplace_foreach else ""
1896
+ unpacked_arguments += FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
1897
+ inp_name="original_self",
1898
+ inp="original_self" + input_suffix,
1899
+ zeros_fn=zeros_fn,
1900
+ )
1901
+ unpacked_arguments += FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(
1902
+ inp_name="original_self",
1903
+ inp="original_self" + input_suffix,
1904
+ )
1905
+ elif inplace and derivative.is_reusing_outplace_formula:
1906
+ # The gradient wasn't already cloned, do it if grad mode is enabled
1907
+ unpacked_arguments += (
1908
+ "self_t = GradMode::is_enabled() ? self_t.clone() : self_t;"
1909
+ )
1910
+
1911
+ if inplace:
1912
+ is_inplace_str = "true"
1913
+ else:
1914
+ is_inplace_str = "false"
1915
+
1916
+ requires_fw_grad = get_any_has_forward_grad_name(derivative.var_names)
1917
+
1918
+ if all(
1919
+ (isinstance(var_type, BaseType) and var_type.is_tensor_like())
1920
+ for var_type in derivative.var_types
1921
+ ):
1922
+ # Is there a way to get from BaseType to BaseCType
1923
+ if len(derivative.var_types) == 1:
1924
+ opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type()
1925
+ if not is_foreach:
1926
+ fw_grad_setters.append(
1927
+ FW_DERIVATIVE_SETTER_TENSOR.substitute(
1928
+ out_arg=res[0], is_inplace=is_inplace_str
1929
+ )
1930
+ )
1931
+ else:
1932
+ assert res[0] == ("result" if not inplace else "self")
1933
+ fw_grad_setters.append(
1934
+ FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute(
1935
+ out_arg=res[0], is_inplace=is_inplace_str
1936
+ )
1937
+ )
1938
+ requires_fw_grad += f" && ({derivative.var_names[0]}.defined())"
1939
+ else:
1940
+ tuple_type = TupleCType(
1941
+ [BaseCType(tensorT)] * len(derivative.var_types)
1942
+ )
1943
+ opt_res_grad_type = OptionalCType(tuple_type).cpp_type()
1944
+ for idx, single_res in enumerate(res):
1945
+ fw_grad_setters.append(
1946
+ FW_DERIVATIVE_SETTER_MULTI_OUTPUT.substitute(
1947
+ idx=idx, all_res="_".join(res), out_arg=single_res
1948
+ )
1949
+ )
1950
+ elif (
1951
+ isinstance(derivative.var_types[0], ListType)
1952
+ and derivative.var_types[0].is_tensor_like()
1953
+ ):
1954
+ assert (
1955
+ len(derivative.var_types) == 1
1956
+ ), "Expected number of outputs to be 1 if function returns ListType"
1957
+ if not is_foreach:
1958
+ opt_res_grad_type = OptionalCType(
1959
+ VectorCType(BaseCType(tensorT))
1960
+ ).cpp_type()
1961
+ fw_grad_setters.append(
1962
+ FW_DERIVATIVE_SETTER_TENSOR_LIST.substitute(
1963
+ out_arg=res[0], is_inplace=is_inplace_str
1964
+ )
1965
+ )
1966
+ else:
1967
+ # TODO(crcrpar): Should this (= the foreach specific logic) be refactored somehow?
1968
+ # Only out-place foreach functions that have entries in `tools/autograd/derivatives.yaml`
1969
+ # can reach here.
1970
+ opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type()
1971
+ fw_grad_setters.append(
1972
+ FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute(
1973
+ out_arg=res[0], is_inplace=is_inplace_str
1974
+ )
1975
+ )
1976
+ else:
1977
+ raise RuntimeError("Unsupported output type for forward derivative")
1978
+
1979
+ if not is_foreach:
1980
+ fw_grad_opt_definition = f"{opt_res_grad_type} {'_'.join(res)}_new_fw_grad_opt = c10::nullopt;"
1981
+ # View ops create fw_grad that already is a view of the base's fw_grad so just use that
1982
+ content.append(
1983
+ FW_DERIVATIVE_TEMPLATE.substitute(
1984
+ fw_grad_opt_definition=fw_grad_opt_definition,
1985
+ requires_fw_grad=requires_fw_grad,
1986
+ formula=derivative.formula,
1987
+ out_arg="_".join(res),
1988
+ unpacked_arguments=unpacked_arguments,
1989
+ )
1990
+ )
1991
+ else:
1992
+ # note(crcrpar): Assuming `self` is TensorList.
1993
+ fw_grad_opt_definition = (
1994
+ f"std::vector<{opt_res_grad_type}> {'_'.join(res)}_new_fw_grad_opts"
1995
+ "(self.size(), c10::nullopt);"
1996
+ )
1997
+ foreach_forward_grad_formula = derivative.formula
1998
+ _foreach_arg: Union[Argument, DifferentiableInput]
1999
+ if inplace:
2000
+ for _foreach_arg, _ref_arg in inplace_foreacharg2refarg.items():
2001
+ # note(crcrpar): Massage only Scalar and ArrayRef<Scalar> here.
2002
+ if not (
2003
+ is_tensor_type(_foreach_arg.type)
2004
+ or is_tensor_list_type(_foreach_arg.type)
2005
+ ):
2006
+ pattern = _foreach_arg.name
2007
+ if isinstance(_foreach_arg.type, ListType):
2008
+ pattern += "[i]"
2009
+ foreach_forward_grad_formula = (
2010
+ foreach_forward_grad_formula.replace(
2011
+ _ref_arg.name, pattern
2012
+ )
2013
+ )
2014
+ else:
2015
+ if (
2016
+ "result" in foreach_forward_grad_formula
2017
+ and "result[i]" not in foreach_forward_grad_formula
2018
+ ):
2019
+ foreach_forward_grad_formula = (
2020
+ foreach_forward_grad_formula.replace("result", "result[i]")
2021
+ )
2022
+
2023
+ content.append(
2024
+ FW_DERIVATIVE_FOREACH_TEMPLATE.substitute(
2025
+ fw_grad_opt_definition=fw_grad_opt_definition,
2026
+ vector_of_optional_tensor=f"{'_'.join(res)}_new_fw_grad_opts",
2027
+ any_has_forward_grad_for_current_index=" || ".join(
2028
+ get_any_has_forward_grad_name(derivative.var_names) + "[i]"
2029
+ for derivative in fw_derivatives
2030
+ ),
2031
+ formula=foreach_forward_grad_formula,
2032
+ unpacked_arguments=unpacked_arguments,
2033
+ )
2034
+ )
2035
+
2036
+ # Set all the grads at the end to avoid: https://github.com/pytorch/pytorch/issues/67367
2037
+ content.append("\n".join(fw_grad_setters))
2038
+ return content
2039
+
2040
+ def get_any_has_fw_grad_cond(derivative: Optional[ForwardDerivative]) -> str:
2041
+ #
2042
+ # Produces a condition string (e.g, "isFwGradDefined(grad_output) || isFwGradDefined(output)")
2043
+ #
2044
+ if derivative is None:
2045
+ # (1) If a derivative is NOT provided, cond will check fw_grad of ALL differentiable inputs
2046
+ # - Used in the out_fn case when we want to forbid fw derivatives
2047
+ # - Used in the case where the fw_derivative is not defined, but we want
2048
+ # To check if there is a decomposition registered for jvp
2049
+ to_check: List[str] = []
2050
+ for inp in list(
2051
+ mapMaybe(
2052
+ gen_differentiable_input,
2053
+ f.func.arguments.non_out + list(f.func.arguments.out), # type: ignore[operator]
2054
+ )
2055
+ ):
2056
+ if is_tensor_type(inp.type):
2057
+ to_check.append(
2058
+ FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp.name)
2059
+ )
2060
+ elif is_tensor_list_type(inp.type):
2061
+ to_check.append(
2062
+ FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE.substitute(
2063
+ req_inp=inp.name
2064
+ )
2065
+ )
2066
+ else:
2067
+ raise RuntimeError(
2068
+ f'Unsupported input type for "{name}" when forbidding forward AD usage.'
2069
+ )
2070
+ return f'({" || ".join(to_check)})'
2071
+ else:
2072
+ # (2) If derivative is provided, use that information to determine which inputs
2073
+ # to check fw_grad for
2074
+ assert derivative.required_inputs_fw_grad is not None
2075
+
2076
+ if len(derivative.required_inputs_fw_grad) == 0:
2077
+ # Handle functions like stack
2078
+ # For these, we don't unpack anything and always call the user function
2079
+ if not (
2080
+ len(differentiable_inputs) == 1
2081
+ and is_tensor_list_type(differentiable_inputs[0].type)
2082
+ ):
2083
+ raise RuntimeError(
2084
+ f'No differentiable input to "{name}" is a differentiable Tensor (as the provided '
2085
+ "forward AD formula does not use any input tangent) even though a forward gradient "
2086
+ "formula has been defined for it. This case should only happen for function that "
2087
+ "take a single TensorList as input. All other cases are not supported right now."
2088
+ )
2089
+ any_has_fw_grad = "true"
2090
+ else:
2091
+ any_has_fw_grad = " || ".join(
2092
+ [
2093
+ (
2094
+ FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE
2095
+ if is_tensor_list_type(inp.type)
2096
+ else FW_DERIVATIVE_CHECK_TEMPLATE
2097
+ ).substitute(req_inp=inp.name)
2098
+ for inp in differentiable_inputs
2099
+ if inp.name in derivative.required_inputs_fw_grad
2100
+ ]
2101
+ )
2102
+ any_has_fw_grad = f"({any_has_fw_grad})"
2103
+
2104
+ return any_has_fw_grad
2105
+
2106
+ def emit_forbid_fw_derivatives(is_out_fn: bool = False) -> str:
2107
+ if is_out_fn:
2108
+ msg = "because it is an out= function"
2109
+ else:
2110
+ msg = (
2111
+ "because it has not been implemented yet.\\nPlease file an issue "
2112
+ "to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
2113
+ "so that we can prioritize its implementation."
2114
+ )
2115
+ cond = get_any_has_fw_grad_cond(derivative=None)
2116
+ return (
2117
+ FW_DERIVATIVE_FORBID_TEMPLATE.substitute(cond=cond, name=name, msg=msg)
2118
+ if cond != ""
2119
+ else ""
2120
+ )
2121
+
2122
+ body: List[str] = []
2123
+ unpack_args_stats, unpacked_bindings = unpack_args(f)
2124
+
2125
+ body.extend(unpack_args_stats)
2126
+ if requires_derivative:
2127
+ body.extend(emit_any_requires_grad())
2128
+ body.extend(emit_any_has_forward_grad())
2129
+ body.extend(emit_check_inplace())
2130
+ body.extend(emit_original_self_definition())
2131
+ body.extend(setup_derivative(differentiable_inputs))
2132
+
2133
+ body.append(emit_call(f, unpacked_bindings, try_jit_decomposition))
2134
+ if requires_derivative:
2135
+ # set_flags has to appear after version_counter, because rebase_history
2136
+ # requires that the counter is incremented before it is called
2137
+ body.append(emit_history())
2138
+ body.extend(emit_check_if_in_complex_autograd_allowlist())
2139
+
2140
+ if is_out_fn:
2141
+ body.append(emit_forbid_fw_derivatives(is_out_fn=True))
2142
+ else:
2143
+ if requires_derivative and not try_jit_decomposition:
2144
+ if len(fw_derivatives) > 0:
2145
+ body.extend(emit_fw_derivatives())
2146
+ else:
2147
+ body.append(emit_forbid_fw_derivatives())
2148
+
2149
+ if requires_derivative:
2150
+ # Save only after the forward AD has been set up
2151
+ body.append(emit_save_outputs())
2152
+
2153
+ if str(f.func.name.name) in RESET_GRAD_ACCUMULATOR:
2154
+ # `inplace` implies that there is exactly one output named `self`,
2155
+ # so we can keep the generated code easy. If you need to
2156
+ # `reset_grad_accumulator` in an operator that's not `inplace`, you can
2157
+ # remove this assert but the code generation will get more elaborate
2158
+ assert inplace
2159
+ body.append("reset_grad_accumulator(self);")
2160
+ if not returns_void:
2161
+ body.append(f"return {get_return_value(f)};")
2162
+ return body