applied-ai-018 commited on
Commit
510ff90
·
verified ·
1 Parent(s): 3c0728c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h +6 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h +21 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h +36 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h +390 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h +9 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h +28 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h +27 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h +6 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h +15 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h +60 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h +8 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h +13 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h +1101 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h +71 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h +104 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h +29 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h +288 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h +210 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h +277 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h +111 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h +47 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h +15 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h +186 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h +114 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h +59 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/ViewFuncs.h +953 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h +25 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h +98 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h +736 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h +226 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h +45 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/jit_decomp_interface.h +50 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler.h +4 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_kineto.h +188 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h +406 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_anomaly_mode.h +44 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_autograd.h +17 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_enum_tag.h +7 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_legacy_variable.h +12 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nested_functions.h +9 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nn_functions.h +7 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_sparse_functions.h +7 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable.h +114 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable_indexing.h +102 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable_hooks.h +13 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h +22 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h +80 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h +40 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h +53 -0
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays)
6
+ extern PyMethodDef DataLoaderMethods[];
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/python_headers.h>
5
+
6
+ #include <ATen/Device.h>
7
+
8
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
9
+ struct TORCH_API THPDevice {
10
+ PyObject_HEAD at::Device device;
11
+ };
12
+
13
+ TORCH_API extern PyTypeObject THPDeviceType;
14
+
15
+ inline bool THPDevice_Check(PyObject* obj) {
16
+ return Py_TYPE(obj) == &THPDeviceType;
17
+ }
18
+
19
+ TORCH_API PyObject* THPDevice_New(const at::Device& device);
20
+
21
+ TORCH_API void THPDevice_init(PyObject* module);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Provides conversions between Python tensor objects and at::Tensor.
4
+
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ #include <ATen/Device.h>
8
+ #include <c10/core/Backend.h>
9
+ #include <c10/core/Layout.h>
10
+ #include <c10/core/ScalarType.h>
11
+ #include <c10/core/ScalarTypeToTypeMeta.h>
12
+ #include <torch/csrc/Export.h>
13
+
14
+ #include <memory>
15
+ #include <string>
16
+
17
+ struct THPDtype;
18
+ struct THPLayout;
19
+
20
+ namespace c10 {
21
+ struct Storage;
22
+ }
23
+
24
+ namespace torch {
25
+ void registerDtypeObject(THPDtype* dtype, at::ScalarType scalarType);
26
+ void registerLayoutObject(THPLayout* thp_layout, at::Layout layout);
27
+
28
+ TORCH_PYTHON_API PyObject* createPyObject(const at::Storage& storage);
29
+ at::Storage createStorage(PyObject* obj);
30
+ std::tuple<at::Storage, at::ScalarType, bool> createStorageGetType(
31
+ PyObject* obj);
32
+ bool isStorage(PyObject* obj);
33
+
34
+ TORCH_PYTHON_API THPDtype* getTHPDtype(at::ScalarType scalarType);
35
+ THPLayout* getTHPLayout(at::Layout layout);
36
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <exception>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <system_error>
7
+
8
+ #include <ATen/detail/FunctionTraits.h>
9
+ #include <c10/util/C++17.h>
10
+ #include <c10/util/Exception.h>
11
+ #include <c10/util/StringUtil.h>
12
+ #include <pybind11/pybind11.h>
13
+ #include <torch/csrc/Export.h>
14
+ #include <torch/csrc/jit/runtime/jit_exception.h>
15
+ #include <torch/csrc/utils/cpp_stacktraces.h>
16
+ #include <torch/csrc/utils/pybind.h>
17
+
18
+ #if defined(USE_DISTRIBUTED) && defined(USE_C10D)
19
+ #include <torch/csrc/distributed/c10d/exception.h>
20
+ #endif
21
+
22
+ static inline void PyErr_SetString(PyObject* type, const std::string& message) {
23
+ PyErr_SetString(type, message.c_str());
24
+ }
25
+ /// NOTE [ Conversion Cpp Python Warning ]
26
+ /// The warning handler cannot set python warnings immediately
27
+ /// as it requires acquiring the GIL (potential deadlock)
28
+ /// and would need to cleanly exit if the warning raised a
29
+ /// python error. To solve this, we buffer the warnings and
30
+ /// process them when we go back to python.
31
+ /// This requires the two try/catch blocks below to handle the
32
+ /// following cases:
33
+ /// - If there is no Error raised in the inner try/catch, the
34
+ /// buffered warnings are processed as python warnings.
35
+ /// - If they don't raise an error, the function process with the
36
+ /// original return code.
37
+ /// - If any of them raise an error, the error is set (PyErr_*) and
38
+ /// the destructor will raise a cpp exception python_error() that
39
+ /// will be caught by the outer try/catch that will be able to change
40
+ /// the return value of the function to reflect the error.
41
+ /// - If an Error was raised in the inner try/catch, the inner try/catch
42
+ /// must set the python error. The buffered warnings are then
43
+ /// processed as cpp warnings as we cannot predict before hand
44
+ /// whether a python warning will raise an error or not and we
45
+ /// cannot handle two errors at the same time.
46
+ /// This advanced handler will only be used in the current thread.
47
+ /// If any other thread is used, warnings will be processed as
48
+ /// cpp warnings.
49
+ #define HANDLE_TH_ERRORS \
50
+ try { \
51
+ torch::PyWarningHandler __enforce_warning_buffer; \
52
+ try {
53
+ #define _CATCH_GENERIC_ERROR(ErrorType, PythonErrorType, retstmnt) \
54
+ catch (const c10::ErrorType& e) { \
55
+ auto msg = torch::get_cpp_stacktraces_enabled() \
56
+ ? e.what() \
57
+ : e.what_without_backtrace(); \
58
+ PyErr_SetString(PythonErrorType, torch::processErrorMsg(msg)); \
59
+ retstmnt; \
60
+ }
61
+
62
+ // Only catch torch-specific exceptions
63
+ #define CATCH_CORE_ERRORS(retstmnt) \
64
+ catch (python_error & e) { \
65
+ e.restore(); \
66
+ retstmnt; \
67
+ } \
68
+ catch (py::error_already_set & e) { \
69
+ e.restore(); \
70
+ retstmnt; \
71
+ } \
72
+ _CATCH_GENERIC_ERROR(IndexError, PyExc_IndexError, retstmnt) \
73
+ _CATCH_GENERIC_ERROR(ValueError, PyExc_ValueError, retstmnt) \
74
+ _CATCH_GENERIC_ERROR(TypeError, PyExc_TypeError, retstmnt) \
75
+ _CATCH_GENERIC_ERROR( \
76
+ NotImplementedError, PyExc_NotImplementedError, retstmnt) \
77
+ _CATCH_GENERIC_ERROR(LinAlgError, THPException_LinAlgError, retstmnt) \
78
+ _CATCH_GENERIC_ERROR( \
79
+ OutOfMemoryError, THPException_OutOfMemoryError, retstmnt) \
80
+ _CATCH_GENERIC_ERROR( \
81
+ DistBackendError, THPException_DistBackendError, retstmnt) \
82
+ _CATCH_GENERIC_ERROR( \
83
+ DistNetworkError, THPException_DistNetworkError, retstmnt) \
84
+ _CATCH_GENERIC_ERROR(DistStoreError, THPException_DistStoreError, retstmnt) \
85
+ _CATCH_GENERIC_ERROR(DistError, THPException_DistError, retstmnt) \
86
+ _CATCH_GENERIC_ERROR(Error, PyExc_RuntimeError, retstmnt) \
87
+ catch (torch::PyTorchError & e) { \
88
+ auto msg = torch::processErrorMsg(e.what()); \
89
+ PyErr_SetString(e.python_type(), msg); \
90
+ retstmnt; \
91
+ }
92
+
93
+ #define CATCH_TH_ERRORS(retstmnt) CATCH_CORE_ERRORS(retstmnt)
94
+
95
+ #define CATCH_ALL_ERRORS(retstmnt) \
96
+ CATCH_TH_ERRORS(retstmnt) \
97
+ catch (const std::exception& e) { \
98
+ auto msg = torch::processErrorMsg(e.what()); \
99
+ PyErr_SetString(PyExc_RuntimeError, msg); \
100
+ retstmnt; \
101
+ }
102
+
103
+ #define END_HANDLE_TH_ERRORS_PYBIND \
104
+ } \
105
+ catch (...) { \
106
+ __enforce_warning_buffer.set_in_exception(); \
107
+ throw; \
108
+ } \
109
+ } \
110
+ catch (py::error_already_set & e) { \
111
+ throw; \
112
+ } \
113
+ catch (py::builtin_exception & e) { \
114
+ throw; \
115
+ } \
116
+ catch (torch::jit::JITException & e) { \
117
+ throw; \
118
+ } \
119
+ catch (const std::exception& e) { \
120
+ torch::translate_exception_to_python(std::current_exception()); \
121
+ throw py::error_already_set(); \
122
+ }
123
+
124
+ #define END_HANDLE_TH_ERRORS_RET(retval) \
125
+ } \
126
+ catch (...) { \
127
+ __enforce_warning_buffer.set_in_exception(); \
128
+ throw; \
129
+ } \
130
+ } \
131
+ catch (const std::exception& e) { \
132
+ torch::translate_exception_to_python(std::current_exception()); \
133
+ return retval; \
134
+ }
135
+
136
+ #define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(nullptr)
137
+
138
+ extern PyObject *THPException_FatalError, *THPException_LinAlgError,
139
+ *THPException_OutOfMemoryError, *THPException_DistError,
140
+ *THPException_DistBackendError, *THPException_DistNetworkError,
141
+ *THPException_DistStoreError;
142
+
143
+ // Throwing this exception means that the python error flags have been already
144
+ // set and control should be immediately returned to the interpreter.
145
+ struct python_error : public std::exception {
146
+ python_error() = default;
147
+
148
+ python_error(const python_error& other)
149
+ : type(other.type),
150
+ value(other.value),
151
+ traceback(other.traceback),
152
+ message(other.message) {
153
+ pybind11::gil_scoped_acquire gil;
154
+ Py_XINCREF(type);
155
+ Py_XINCREF(value);
156
+ Py_XINCREF(traceback);
157
+ }
158
+
159
+ python_error(python_error&& other) noexcept
160
+ : type(other.type),
161
+ value(other.value),
162
+ traceback(other.traceback),
163
+ message(std::move(other.message)) {
164
+ other.type = nullptr;
165
+ other.value = nullptr;
166
+ other.traceback = nullptr;
167
+ }
168
+
169
+ // NOLINTNEXTLINE(bugprone-exception-escape)
170
+ ~python_error() override {
171
+ if (type || value || traceback) {
172
+ pybind11::gil_scoped_acquire gil;
173
+ Py_XDECREF(type);
174
+ Py_XDECREF(value);
175
+ Py_XDECREF(traceback);
176
+ }
177
+ }
178
+
179
+ const char* what() const noexcept override {
180
+ return message.c_str();
181
+ }
182
+
183
+ void build_message() {
184
+ // Ensure we have the GIL.
185
+ pybind11::gil_scoped_acquire gil;
186
+
187
+ // No errors should be set when we enter the function since PyErr_Fetch
188
+ // clears the error indicator.
189
+ TORCH_INTERNAL_ASSERT(!PyErr_Occurred());
190
+
191
+ // Default message.
192
+ message = "python_error";
193
+
194
+ // Try to retrieve the error message from the value.
195
+ if (value != nullptr) {
196
+ // Reference count should not be zero.
197
+ TORCH_INTERNAL_ASSERT(Py_REFCNT(value) > 0);
198
+
199
+ PyObject* pyStr = PyObject_Str(value);
200
+ if (pyStr != nullptr) {
201
+ PyObject* encodedString =
202
+ PyUnicode_AsEncodedString(pyStr, "utf-8", "strict");
203
+ if (encodedString != nullptr) {
204
+ char* bytes = PyBytes_AS_STRING(encodedString);
205
+ if (bytes != nullptr) {
206
+ // Set the message.
207
+ message = std::string(bytes);
208
+ }
209
+ Py_XDECREF(encodedString);
210
+ }
211
+ Py_XDECREF(pyStr);
212
+ }
213
+ }
214
+
215
+ // Clear any errors since we don't want to propagate errors for functions
216
+ // that are trying to build a string for the error message.
217
+ PyErr_Clear();
218
+ }
219
+
220
+ /** Saves the exception so that it can be re-thrown on a different thread */
221
+ inline void persist() {
222
+ if (type)
223
+ return; // Don't overwrite exceptions
224
+ // PyErr_Fetch overwrites the pointers
225
+ pybind11::gil_scoped_acquire gil;
226
+ Py_XDECREF(type);
227
+ Py_XDECREF(value);
228
+ Py_XDECREF(traceback);
229
+ PyErr_Fetch(&type, &value, &traceback);
230
+ build_message();
231
+ }
232
+
233
+ /** Sets the current Python error from this exception */
234
+ inline void restore() {
235
+ if (!type)
236
+ return;
237
+ // PyErr_Restore steals references
238
+ pybind11::gil_scoped_acquire gil;
239
+ Py_XINCREF(type);
240
+ Py_XINCREF(value);
241
+ Py_XINCREF(traceback);
242
+ PyErr_Restore(type, value, traceback);
243
+ }
244
+
245
+ PyObject* type{nullptr};
246
+ PyObject* value{nullptr};
247
+ PyObject* traceback{nullptr};
248
+
249
+ // Message to return to the user when 'what()' is invoked.
250
+ std::string message;
251
+ };
252
+
253
+ bool THPException_init(PyObject* module);
254
+
255
+ namespace torch {
256
+
257
+ // Set python current exception from a C++ exception
258
+ TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&);
259
+
260
+ TORCH_PYTHON_API std::string processErrorMsg(std::string str);
261
+
262
+ // Abstract base class for exceptions which translate to specific Python types
263
+ struct PyTorchError : public std::exception {
264
+ PyTorchError() = default;
265
+ PyTorchError(std::string msg_) : msg(std::move(msg_)) {}
266
+ virtual PyObject* python_type() = 0;
267
+ const char* what() const noexcept override {
268
+ return msg.c_str();
269
+ }
270
+ std::string msg;
271
+ };
272
+
273
+ // Declare a printf-like function on gcc & clang
274
+ // The compiler can then warn on invalid format specifiers
275
+ #ifdef __GNUC__
276
+ #define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \
277
+ __attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX)))
278
+ #else
279
+ #define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX)
280
+ #endif
281
+
282
+ // Translates to Python TypeError
283
+ struct TypeError : public PyTorchError {
284
+ using PyTorchError::PyTorchError;
285
+ TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
286
+ PyObject* python_type() override {
287
+ return PyExc_TypeError;
288
+ }
289
+ };
290
+
291
+ // Translates to Python AttributeError
292
+ struct AttributeError : public PyTorchError {
293
+ AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
294
+ PyObject* python_type() override {
295
+ return PyExc_AttributeError;
296
+ }
297
+ };
298
+
299
+ // ATen warning handler for Python
300
+ struct PyWarningHandler {
301
+ // Move actual handler into a separate class with a noexcept
302
+ // destructor. Otherwise, we need to force all WarningHandler
303
+ // subclasses to have a noexcept(false) destructor.
304
+ struct InternalHandler : at::WarningHandler {
305
+ ~InternalHandler() override = default;
306
+ void process(const c10::Warning& warning) override;
307
+
308
+ std::vector<c10::Warning> warning_buffer_;
309
+ };
310
+
311
+ public:
312
+ /// See NOTE [ Conversion Cpp Python Warning ] for noexcept justification
313
+ TORCH_PYTHON_API PyWarningHandler() noexcept(true);
314
+ // NOLINTNEXTLINE(bugprone-exception-escape)
315
+ TORCH_PYTHON_API ~PyWarningHandler() noexcept(false);
316
+
317
+ /** Call if an exception has been thrown
318
+
319
+ * Necessary to determine if it is safe to throw from the desctructor since
320
+ * std::uncaught_exception is buggy on some platforms and generally
321
+ * unreliable across dynamic library calls.
322
+ */
323
+ void set_in_exception() {
324
+ in_exception_ = true;
325
+ }
326
+
327
+ private:
328
+ InternalHandler internal_handler_;
329
+ at::WarningHandler* prev_handler_;
330
+ bool in_exception_;
331
+ };
332
+
333
+ namespace detail {
334
+
335
+ struct noop_gil_scoped_release {
336
+ // user-defined constructor (i.e. not defaulted) to avoid
337
+ // unused-variable warnings at usage sites of this class
338
+ noop_gil_scoped_release() {}
339
+ };
340
+
341
+ template <bool release_gil>
342
+ using conditional_gil_scoped_release = std::conditional_t<
343
+ release_gil,
344
+ pybind11::gil_scoped_release,
345
+ noop_gil_scoped_release>;
346
+
347
+ template <typename Func, size_t i>
348
+ using Arg = typename invoke_traits<Func>::template arg<i>::type;
349
+
350
+ template <typename Func, size_t... Is, bool release_gil>
351
+ auto wrap_pybind_function_impl_(
352
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
353
+ Func&& f,
354
+ std::index_sequence<Is...>,
355
+ std::bool_constant<release_gil>) {
356
+ namespace py = pybind11;
357
+
358
+ // f=f is needed to handle function references on older compilers
359
+ return [f = std::forward<Func>(f)](Arg<Func, Is>... args) {
360
+ HANDLE_TH_ERRORS
361
+ conditional_gil_scoped_release<release_gil> no_gil;
362
+ return c10::guts::invoke(f, std::forward<Arg<Func, Is>>(args)...);
363
+ END_HANDLE_TH_ERRORS_PYBIND
364
+ };
365
+ }
366
+ } // namespace detail
367
+
368
+ // Wrap a function with TH error and warning handling.
369
+ // Returns a function object suitable for registering with pybind11.
370
+ template <typename Func>
371
+ auto wrap_pybind_function(Func&& f) {
372
+ using traits = invoke_traits<Func>;
373
+ return torch::detail::wrap_pybind_function_impl_(
374
+ std::forward<Func>(f),
375
+ std::make_index_sequence<traits::arity>{},
376
+ std::false_type{});
377
+ }
378
+
379
+ // Wrap a function with TH error, warning handling and releases the GIL.
380
+ // Returns a function object suitable for registering with pybind11.
381
+ template <typename Func>
382
+ auto wrap_pybind_function_no_gil(Func&& f) {
383
+ using traits = invoke_traits<Func>;
384
+ return torch::detail::wrap_pybind_function_impl_(
385
+ std::forward<Func>(f),
386
+ std::make_index_sequence<traits::arity>{},
387
+ std::true_type{});
388
+ }
389
+
390
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ #ifdef THP_BUILD_MAIN_LIB
6
+ #define TORCH_PYTHON_API C10_EXPORT
7
+ #else
8
+ #define TORCH_PYTHON_API C10_IMPORT
9
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
8
+ struct THPGenerator {
9
+ PyObject_HEAD at::Generator cdata;
10
+ };
11
+
12
+ // Creates a new Python object wrapping the default at::Generator. The reference
13
+ // is borrowed. The caller should ensure that the at::Generator object lifetime
14
+ // last at least as long as the Python wrapper.
15
+ TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator(
16
+ at::Generator cdata);
17
+
18
+ #define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass)
19
+
20
+ TORCH_PYTHON_API extern PyObject* THPGeneratorClass;
21
+
22
+ bool THPGenerator_init(PyObject* module);
23
+
24
+ TORCH_PYTHON_API PyObject* THPGenerator_Wrap(at::Generator gen);
25
+
26
+ // Creates a new Python object for a Generator. The Generator must not already
27
+ // have a PyObject* associated with it.
28
+ PyObject* THPGenerator_NewWithVar(PyTypeObject* type, at::Generator gen);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <c10/core/MemoryFormat.h>
6
+
7
+ #include <string>
8
+
9
+ const int MEMORY_FORMAT_NAME_LEN = 64;
10
+
11
+ struct THPMemoryFormat {
12
+ PyObject_HEAD at::MemoryFormat memory_format;
13
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
14
+ char name[MEMORY_FORMAT_NAME_LEN + 1];
15
+ };
16
+
17
+ extern PyTypeObject THPMemoryFormatType;
18
+
19
+ inline bool THPMemoryFormat_Check(PyObject* obj) {
20
+ return Py_TYPE(obj) == &THPMemoryFormatType;
21
+ }
22
+
23
+ PyObject* THPMemoryFormat_New(
24
+ at::MemoryFormat memory_format,
25
+ const std::string& name);
26
+
27
+ void THPMemoryFormat_init(PyObject* module);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #ifndef THP_MODULE_INC
2
+ #define THP_MODULE_INC
3
+
4
+ #define THP_STATELESS_ATTRIBUTE_NAME "_torch"
5
+
6
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/variable.h>
4
+ #include <torch/csrc/python_headers.h>
5
+ #include <cstdint>
6
+
7
+ extern PyTypeObject THPSizeType;
8
+
9
+ #define THPSize_Check(obj) (Py_TYPE(obj) == &THPSizeType)
10
+
11
+ PyObject* THPSize_New(const torch::autograd::Variable& t);
12
+ PyObject* THPSize_NewFromSizes(int64_t dim, const int64_t* sizes);
13
+ PyObject* THPSize_NewFromSymSizes(const at::Tensor& t);
14
+
15
+ void THPSize_init(PyObject* module);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STORAGE_INC
2
+ #define THP_STORAGE_INC
3
+
4
+ #include <Python.h>
5
+ #include <c10/core/Storage.h>
6
+ #include <torch/csrc/Exceptions.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/Types.h>
9
+
10
+ #define THPStorageStr "torch.UntypedStorage"
11
+
12
+ struct THPStorage {
13
+ PyObject_HEAD;
14
+ c10::MaybeOwned<c10::Storage> cdata;
15
+ bool is_hermetic;
16
+ };
17
+
18
+ TORCH_PYTHON_API PyObject* THPStorage_Wrap(c10::Storage storage);
19
+ TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage(
20
+ PyTypeObject* type,
21
+ c10::Storage _storage,
22
+ c10::impl::PyInterpreterStatus status,
23
+ bool allow_preexisting_pyobj = false);
24
+ extern PyTypeObject* THPStorageClass;
25
+
26
+ static inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) {
27
+ return tp == THPStorageClass;
28
+ }
29
+
30
+ static inline bool THPStorage_CheckExact(PyObject* obj) {
31
+ return THPStorage_CheckTypeExact(Py_TYPE(obj));
32
+ }
33
+
34
+ inline bool THPStorage_Check(PyObject* obj) {
35
+ if (!THPStorageClass)
36
+ return false;
37
+
38
+ const auto result = PyObject_IsInstance(obj, (PyObject*)THPStorageClass);
39
+ if (result == -1)
40
+ throw python_error();
41
+ return result;
42
+ }
43
+
44
+ bool THPStorage_init(PyObject* module);
45
+ void THPStorage_postInit(PyObject* module);
46
+
47
+ void THPStorage_assertNotNull(THPStorage* storage);
48
+ void THPStorage_assertNotNull(PyObject* obj);
49
+
50
+ extern PyTypeObject THPStorageType;
51
+
52
+ inline const c10::Storage& THPStorage_Unpack(THPStorage* storage) {
53
+ return *storage->cdata;
54
+ }
55
+
56
+ inline const c10::Storage& THPStorage_Unpack(PyObject* obj) {
57
+ return THPStorage_Unpack(reinterpret_cast<THPStorage*>(obj));
58
+ }
59
+
60
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STORAGE_METHODS_INC
2
+ #define THP_STORAGE_METHODS_INC
3
+
4
+ #include <Python.h>
5
+
6
+ PyMethodDef* THPStorage_getMethods();
7
+
8
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_TYPES_INC
2
+ #define THP_TYPES_INC
3
+
4
+ #include <cstddef>
5
+
6
+ #ifndef INT64_MAX
7
+ #include <cstdint>
8
+ #endif
9
+
10
+ template <typename T>
11
+ struct THPTypeInfo {};
12
+
13
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h ADDED
@@ -0,0 +1,1101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // NB: Must be at the top of file to avoid including the deprecated "math.h".
4
+ // https://stackoverflow.com/questions/6563810/m-pi-works-with-math-h-but-not-with-cmath-in-visual-studio
5
+ #ifdef _MSC_VER
6
+ #ifndef _USE_MATH_DEFINES
7
+ #define _USE_MATH_DEFINES
8
+ #endif
9
+ #include <cmath>
10
+ #endif
11
+
12
+ #include <ATen/ATen.h>
13
+ #include <torch/csrc/autograd/generated/Functions.h>
14
+
15
+ namespace torch::autograd::generated::details {
16
+
17
+ extern const char* kCudnnDoubleBackwardMsg;
18
+
19
+ // A simple way to imperatively compute index ranges for slots
20
+ // that have been flattened
21
+ struct TORCH_API IndexRangeGenerator {
22
+ IndexRange range(size_t range_size) {
23
+ i += range_size;
24
+ return {i - range_size, i};
25
+ }
26
+ size_t size() {
27
+ return i;
28
+ }
29
+
30
+ private:
31
+ size_t i = 0;
32
+ };
33
+
34
+ TORCH_API Tensor toNonOptFwGrad(const c10::optional<Tensor>& t);
35
+ TORCH_API Tensor toNonOptPrimal(const c10::optional<Tensor>& t);
36
+ TORCH_API Tensor toNonOptTensor(const c10::optional<Tensor>& t);
37
+
38
+ TORCH_API inline c10::optional<Tensor> wrap_opt_if(
39
+ const Tensor& t,
40
+ const bool cond) {
41
+ using OptTensor = c10::optional<Tensor>;
42
+ return cond ? OptTensor(t) : static_cast<OptTensor>(c10::nullopt);
43
+ }
44
+
45
+ TORCH_API Tensor
46
+ apply_loss_reduction(const Tensor& unreduced, int64_t reduction);
47
+ TORCH_API bool any_variable_defined(const variable_list& variables);
48
+ TORCH_API void copy_range(
49
+ variable_list& out,
50
+ IndexRange range,
51
+ const at::Tensor& t);
52
+ TORCH_API void copy_range(
53
+ variable_list& out,
54
+ IndexRange range,
55
+ at::ArrayRef<at::Tensor> t);
56
+ TORCH_API at::Tensor copysign_tensor_self_backward(
57
+ const Tensor& grad,
58
+ const Tensor& self,
59
+ const Tensor& result);
60
+ TORCH_API at::Tensor not_implemented(const char* name, const char* reason = "");
61
+ TORCH_API std::vector<Tensor> not_implemented_list(
62
+ const char* name,
63
+ const char* reason = "");
64
+ at::Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result);
65
+ at::Tensor maybe_multiply(const at::Tensor& t, const at::Scalar& s);
66
+ int64_t _safe_size(IntArrayRef sizes, IntArrayRef dim);
67
+ Tensor restore_reduced_dims(
68
+ const Tensor& output,
69
+ IntArrayRef dims,
70
+ bool keepdim);
71
+ Tensor scale_grad_by_count(
72
+ const Tensor& grad,
73
+ const Tensor& mask,
74
+ IntArrayRef dims);
75
+ at::Tensor norm_backward(
76
+ const at::Tensor& grad,
77
+ const at::Tensor& self,
78
+ const optional<at::Scalar>& p_,
79
+ const at::Tensor& norm);
80
+ at::Tensor norm_backward(
81
+ at::Tensor grad,
82
+ const at::Tensor& self,
83
+ const optional<at::Scalar>& p_,
84
+ at::Tensor norm,
85
+ at::IntArrayRef dim,
86
+ bool keepdim);
87
+ Tensor norm_jvp(
88
+ const Tensor& self_p,
89
+ const Tensor& self_t,
90
+ const optional<Scalar>& p_,
91
+ Tensor norm,
92
+ IntArrayRef dim,
93
+ bool keepdim);
94
+ Tensor norm_jvp(
95
+ const Tensor& grad,
96
+ const Tensor& self,
97
+ const optional<Scalar>& p_,
98
+ Tensor norm);
99
+ Tensor _nested_from_padded_backward(
100
+ const Tensor& grad,
101
+ const Tensor& input,
102
+ const bool do_transform_0213);
103
+ std::tuple<Tensor, Tensor, Tensor> linear_double_backward(
104
+ const variable_list& grads,
105
+ const Tensor& self,
106
+ const Tensor& grad_output,
107
+ const Tensor& weight);
108
+ Tensor linalg_vector_norm_jvp(
109
+ const Tensor& self_p,
110
+ const Tensor& self_t,
111
+ const Scalar& scalar_ord,
112
+ Tensor norm,
113
+ const at::OptionalIntArrayRef& opt_dim,
114
+ bool keepdim);
115
+ at::Tensor linalg_vector_norm_backward(
116
+ at::Tensor grad,
117
+ const at::Tensor& self,
118
+ const at::Scalar& ord,
119
+ at::Tensor norm,
120
+ const at::OptionalIntArrayRef& opt_dim,
121
+ bool keepdim);
122
+ at::Tensor pow_backward(
123
+ at::Tensor grad,
124
+ const at::Tensor& self,
125
+ const at::Scalar& exponent_);
126
+ at::Tensor pow_backward_self(
127
+ const at::Tensor& grad,
128
+ const at::Tensor& self,
129
+ const at::Tensor& exponent);
130
+ at::Tensor pow_backward_exponent(
131
+ const at::Tensor& grad,
132
+ const at::Tensor& self,
133
+ const at::Tensor& exponent,
134
+ const at::Tensor& result);
135
+ at::Tensor pow_backward_exponent(
136
+ const at::Tensor& grad,
137
+ const at::Scalar& base,
138
+ const at::Tensor& exponent,
139
+ const at::Tensor& result);
140
+ at::Tensor angle_backward(const at::Tensor& grad, const at::Tensor& self);
141
+ template <typename T>
142
+ at::Tensor mul_tensor_backward(const Tensor& grad, T other, ScalarType self_st);
143
+ template <typename T>
144
+ at::Tensor div_tensor_self_backward(
145
+ const Tensor& grad,
146
+ T other,
147
+ ScalarType self_st);
148
+ at::Tensor div_tensor_other_backward(
149
+ const Tensor& grad,
150
+ const Tensor& self,
151
+ const Tensor& other);
152
+ template <typename T>
153
+ at::Tensor div_tensor_self_backward(
154
+ const Tensor& grad,
155
+ T other,
156
+ ScalarType self_st,
157
+ const c10::optional<c10::string_view>& rounding_mode);
158
+ at::Tensor div_tensor_other_backward(
159
+ const Tensor& grad,
160
+ const Tensor& self,
161
+ const Tensor& other,
162
+ const c10::optional<c10::string_view>& rounding_mode);
163
+ at::Tensor mvlgamma_backward(
164
+ const at::Tensor& grad,
165
+ const at::Tensor& self,
166
+ int64_t p);
167
+ at::Tensor permute_backwards(const at::Tensor& grad, at::IntArrayRef fwd_dims);
168
+ at::Tensor rad2deg_backward(const at::Tensor& grad);
169
+ at::Tensor deg2rad_backward(const at::Tensor& grad);
170
+ at::Tensor unsqueeze_multiple(
171
+ const at::Tensor& t,
172
+ at::OptionalIntArrayRef opt_dim,
173
+ size_t n_dims);
174
+ at::Tensor sum_backward(
175
+ const at::Tensor& grad,
176
+ at::SymIntArrayRef sizes,
177
+ at::OptionalIntArrayRef opt_dims,
178
+ bool keepdim);
179
+ at::Tensor sum_backward(
180
+ const at::Tensor& grad,
181
+ c10::SymIntArrayRef sizes,
182
+ c10::IntArrayRef dims,
183
+ bool keepdim);
184
+ at::Tensor nansum_backward(
185
+ const at::Tensor& grad,
186
+ const at::Tensor& self,
187
+ at::OptionalIntArrayRef dims,
188
+ bool keepdim);
189
+ std::vector<int64_t> reverse_list(const at::IntArrayRef list);
190
+ std::vector<c10::SymInt> reverse_list_symint(const c10::SymIntArrayRef list);
191
+ at::Tensor reverse_dim(const at::Tensor& t, int64_t dim);
192
+ at::Tensor prod_safe_zeros_backward(
193
+ const at::Tensor& grad,
194
+ const at::Tensor& inp,
195
+ int64_t dim);
196
+ at::Tensor prod_backward(
197
+ const at::Tensor& grad,
198
+ const at::Tensor& input,
199
+ const at::Tensor& result);
200
+ at::Tensor prod_backward(
201
+ at::Tensor grad,
202
+ const at::Tensor& input,
203
+ at::Tensor result,
204
+ int64_t dim,
205
+ bool keepdim);
206
+ at::Tensor solve_jvp(
207
+ const Tensor& X,
208
+ const Tensor& A,
209
+ const Tensor& dA,
210
+ const Tensor& dB);
211
+ at::Tensor solve_backward_self(
212
+ const at::Tensor& grad,
213
+ const at::Tensor& self,
214
+ const at::Tensor& A);
215
+ at::Tensor solve_backward_A(
216
+ const at::Tensor& grad,
217
+ const at::Tensor& self,
218
+ const at::Tensor& A,
219
+ const at::Tensor& solution);
220
+ at::Tensor cumsum_backward(const at::Tensor& grad, int64_t dim);
221
+ at::Tensor logsumexp_backward(
222
+ at::Tensor grad,
223
+ const at::Tensor& self,
224
+ at::Tensor result,
225
+ at::IntArrayRef dim,
226
+ bool keepdim);
227
+ at::Tensor logsumexp_jvp(
228
+ const at::Tensor& self_p,
229
+ const at::Tensor& self_t,
230
+ IntArrayRef dim,
231
+ bool keepdim);
232
+ at::Tensor logcumsumexp_backward(
233
+ at::Tensor grad,
234
+ const at::Tensor& self,
235
+ at::Tensor result,
236
+ int64_t dim);
237
+ at::Tensor logcumsumexp_jvp(
238
+ const at::Tensor& self_p,
239
+ const at::Tensor& self_t,
240
+ int64_t dim);
241
+ at::Tensor unbind_backward(const variable_list& grads, int64_t dim);
242
+ at::Tensor unbind_backward_nested(
243
+ const variable_list& grads,
244
+ const Tensor& nt_sizes,
245
+ int64_t dim,
246
+ const at::TensorOptions& options);
247
+ at::Tensor unsqueeze_to(const at::Tensor& self, c10::SymIntArrayRef sym_sizes);
248
+ at::Tensor unsqueeze_to(
249
+ const at::Tensor& self,
250
+ int64_t dim,
251
+ c10::SymIntArrayRef sym_sizes);
252
+ at::Tensor unsqueeze_to(
253
+ const at::Tensor& self,
254
+ IntArrayRef dim,
255
+ c10::SymIntArrayRef sym_sizes);
256
+ std::vector<at::Tensor> cat_tensors_backward(
257
+ const at::Tensor& grad,
258
+ const std::vector<std::vector<c10::SymInt>>& sizes,
259
+ const std::vector<ScalarType>& dtypes,
260
+ int64_t dim);
261
+ std::vector<at::Tensor> stack_tensors_backward(
262
+ const at::Tensor& grad,
263
+ int64_t dim,
264
+ const std::vector<ScalarType>& dtypes);
265
+ std::vector<at::Tensor> block_diag_backward(
266
+ const at::Tensor& grad,
267
+ const std::vector<std::vector<int64_t>>& sizes,
268
+ const std::vector<ScalarType>& dtypes);
269
+ at::Tensor clamp_backward(
270
+ const at::Tensor& grad,
271
+ const at::Tensor& self,
272
+ const optional<at::Scalar>& min,
273
+ const optional<at::Scalar>& max);
274
+ at::Tensor clamp_backward(
275
+ const at::Tensor& grad,
276
+ const at::Tensor& self,
277
+ const at::Tensor& min,
278
+ const at::Tensor& max);
279
+ std::tuple<at::Tensor, at::Tensor> clamp_backward_min_max(
280
+ const at::Tensor& grad,
281
+ const at::Tensor& self,
282
+ const at::Tensor& min,
283
+ const at::Tensor& max,
284
+ const std::array<bool, 2>&);
285
+ at::Tensor clamp_jvp(
286
+ const Tensor& self_p,
287
+ const Tensor& self_t,
288
+ const Tensor& min_p,
289
+ const Tensor& min_t,
290
+ const Tensor& max_p,
291
+ const Tensor& max_t);
292
+ at::SymIntArrayRef strides_or_error(
293
+ const Tensor& input,
294
+ c10::string_view const& input_name);
295
+ at::Tensor mm_mat1_backward(
296
+ const Tensor& grad,
297
+ const Tensor& mat2,
298
+ at::SymIntArrayRef mat1_sizes,
299
+ at::SymIntArrayRef mat1_strides,
300
+ c10::Layout mat1_layout,
301
+ const Scalar& alpha);
302
+ at::Tensor mm_mat2_backward(
303
+ const at::Tensor& grad,
304
+ const at::Tensor& mat1,
305
+ at::SymIntArrayRef sizes,
306
+ at::SymIntArrayRef strides,
307
+ c10::Layout layout,
308
+ const at::Scalar& alpha);
309
+ at::Tensor mm_mat1_sparse_backward(
310
+ const at::Tensor& grad,
311
+ const at::Tensor& mat1,
312
+ const at::Tensor& mat2,
313
+ const at::Scalar& alpha);
314
+ std::tuple<Tensor, Tensor, Tensor> sparse_sampled_addmm_backward(
315
+ const Tensor& grad,
316
+ const Tensor& self,
317
+ const c10::optional<Tensor>& mat1,
318
+ const c10::optional<Tensor>& mat2,
319
+ const Scalar& alpha,
320
+ const Scalar& beta,
321
+ const std::array<bool, 3>& grad_input_mask);
322
+ at::Tensor sparse_mask_backward(
323
+ const at::Tensor& grad,
324
+ const at::Tensor& mask,
325
+ c10::Layout self_layout);
326
+ at::Tensor sparse_sparse_matmul_backward(
327
+ const at::Tensor& grad,
328
+ const at::Tensor& mat1,
329
+ const at::Tensor& mat2,
330
+ int64_t grad_order);
331
+ at::Tensor renorm_backward(
332
+ const at::Tensor& grad,
333
+ const at::Tensor& self,
334
+ const at::Scalar& p,
335
+ int64_t dim,
336
+ const at::Scalar& maxnorm);
337
+ at::Tensor renorm_jvp(
338
+ const at::Tensor& self_p,
339
+ const at::Tensor& self_t,
340
+ const at::Scalar& p,
341
+ int64_t dim,
342
+ const at::Scalar& maxnorm);
343
+ at::Tensor repeat_backward(
344
+ at::Tensor grad,
345
+ at::SymIntArrayRef repeats,
346
+ at::SymIntArrayRef input_shape);
347
+ at::Tensor _fused_dropout_backward(
348
+ const at::Tensor& grad,
349
+ const at::Tensor& mask,
350
+ double p1m);
351
+ at::Tensor infinitely_differentiable_native_dropout_backward(
352
+ const at::Tensor& grad,
353
+ const at::Tensor& mask,
354
+ double scale);
355
+ at::Tensor native_dropout_double_backward(
356
+ const at::Tensor& ggI,
357
+ const at::Tensor& grad,
358
+ const at::Tensor& mask,
359
+ double scale);
360
+ at::Tensor evenly_distribute_backward(
361
+ const at::Tensor& grad,
362
+ const at::Tensor& input,
363
+ const at::Tensor& value);
364
+ Tensor sgn_backward(const Tensor& x, const Tensor& gx, const Tensor& sgn);
365
+ Tensor masked_fill_backward(const Tensor& grad, const Tensor& mask);
366
+ at::Tensor var_backward(
367
+ at::Tensor grad,
368
+ const at::Tensor& self,
369
+ at::OptionalIntArrayRef dim,
370
+ const c10::optional<c10::Scalar>& correction,
371
+ bool keepdim);
372
+ at::Tensor var_jvp(
373
+ const at::Tensor& self_t,
374
+ const at::Tensor& self_p,
375
+ const at::Tensor& result,
376
+ at::OptionalIntArrayRef dim_opt,
377
+ const c10::optional<c10::Scalar>& correction,
378
+ bool keepdim);
379
+ at::Tensor std_backward(
380
+ const at::Tensor& result,
381
+ const at::Tensor& grad,
382
+ const at::Tensor& self,
383
+ at::OptionalIntArrayRef dim,
384
+ const c10::optional<c10::Scalar>& correction,
385
+ bool keepdim);
386
+ Tensor mean_backward(
387
+ const Tensor& grad,
388
+ c10::SymIntArrayRef shape,
389
+ at::OptionalIntArrayRef opt_dim,
390
+ c10::SymInt numel,
391
+ bool keepdim);
392
+ Tensor var_mean_backward(
393
+ const Tensor& gvar,
394
+ const Tensor& gmean,
395
+ const Tensor& self,
396
+ at::OptionalIntArrayRef dim_opt,
397
+ const c10::optional<c10::Scalar>& correction,
398
+ bool keepdim);
399
+ Tensor std_mean_backward(
400
+ const Tensor& gstd,
401
+ const Tensor& gmean,
402
+ const Tensor& self,
403
+ const Tensor& std,
404
+ at::OptionalIntArrayRef dim_opt,
405
+ const c10::optional<c10::Scalar>& correction,
406
+ bool keepdim);
407
+ at::Tensor cholesky_backward(
408
+ const at::Tensor& grad,
409
+ bool upper,
410
+ const at::Tensor& L);
411
+ at::Tensor cholesky_jvp(
412
+ const at::Tensor& input_tangent,
413
+ const at::Tensor& L,
414
+ bool upper);
415
+ at::Tensor cholesky_inverse_backward(
416
+ const at::Tensor& grad,
417
+ const at::Tensor& L,
418
+ bool upper,
419
+ const at::Tensor& inverse);
420
+ at::Tensor cholesky_inverse_jvp(
421
+ const at::Tensor& F,
422
+ const at::Tensor& dF,
423
+ const at::Tensor& X,
424
+ bool upper);
425
+ Tensor pinv_jvp(const Tensor& A, const Tensor& pinvA, const Tensor& dA);
426
+ Tensor pinv_backward(const Tensor& grad, const Tensor& pinvA, const Tensor& A);
427
+ at::Tensor split_with_sizes_backward(
428
+ const std::vector<torch::autograd::Variable>& grads,
429
+ c10::SymIntArrayRef split_sizes,
430
+ int64_t dim,
431
+ c10::SymIntArrayRef sizes,
432
+ const at::TensorOptions& options);
433
+ at::Tensor _nested_split_with_sizes_backward(
434
+ const std::vector<torch::autograd::Variable>& grads,
435
+ c10::SymIntArrayRef split_sizes,
436
+ int64_t dim,
437
+ const Tensor& nt_sizes,
438
+ const at::TensorOptions& options);
439
+ at::Tensor split_backward(
440
+ const std::vector<torch::autograd::Variable>& grads,
441
+ const c10::SymInt& split_size,
442
+ int64_t dim,
443
+ c10::SymIntArrayRef sizes,
444
+ const at::TensorOptions& options);
445
+ at::Tensor max_pool_double_backward(
446
+ const at::Tensor& grad,
447
+ const at::Tensor& indices,
448
+ int dim);
449
+ at::Tensor error_for_max_pool2d_double_backward();
450
+ at::Tensor glu_double_backward(
451
+ const at::Tensor& grad,
452
+ const at::Tensor& grad_output,
453
+ const at::Tensor& input,
454
+ int64_t dim);
455
+ at::Tensor glu_double_backward_grad_output(
456
+ const at::Tensor& grad,
457
+ const at::Tensor& input,
458
+ int64_t dim);
459
+ at::Tensor infinitely_differentiable_silu_backward(
460
+ const at::Tensor& grad_output,
461
+ const at::Tensor& input);
462
+ at::Tensor infinitely_differentiable_mish_backward(
463
+ const at::Tensor& grad_output,
464
+ const at::Tensor& input);
465
+ Tensor infinitely_differentiable_logit_backward(
466
+ const Tensor& grad,
467
+ const Tensor& self,
468
+ c10::optional<double> eps);
469
+ Tensor binary_cross_entropy_target_backward(
470
+ const Tensor& grad,
471
+ const Tensor& self,
472
+ const Tensor& target,
473
+ const c10::optional<Tensor>& weight,
474
+ int64_t reduction);
475
+ Tensor binary_cross_entropy_double_backward_target(
476
+ const Tensor& grad,
477
+ const Tensor& grad_output,
478
+ const Tensor& self,
479
+ const Tensor& target,
480
+ const c10::optional<Tensor>& weight,
481
+ int64_t reduction);
482
+ Tensor binary_cross_entropy_with_logits_backward(
483
+ const Tensor& grad,
484
+ const Tensor& input,
485
+ const Tensor& target,
486
+ const c10::optional<Tensor>& weight_opt,
487
+ const c10::optional<Tensor>& pos_weight_opt,
488
+ int64_t reduction);
489
+ at::Tensor binary_cross_entropy_with_logits_target_backward(
490
+ const at::Tensor& grad_output,
491
+ const at::Tensor& self,
492
+ const at::Tensor& target,
493
+ const c10::optional<at::Tensor>& weight,
494
+ const c10::optional<at::Tensor>& pos_weight,
495
+ int64_t reduction);
496
+ at::Tensor log_sigmoid_double_backward(
497
+ const at::Tensor& grad,
498
+ const at::Tensor& input);
499
+ at::Tensor softmax_double_backward(
500
+ const at::Tensor& grad,
501
+ const at::Tensor& grad_output,
502
+ int dim,
503
+ const at::Tensor& output);
504
+ at::Tensor binary_cross_entropy_double_backward(
505
+ const at::Tensor& grad_output,
506
+ const at::Tensor& grad,
507
+ const at::Tensor& input,
508
+ const at::Tensor& target,
509
+ const c10::optional<at::Tensor>& weight,
510
+ int64_t reduction);
511
+ at::Tensor binary_cross_entropy_double_backward_grad_output(
512
+ const at::Tensor& grad,
513
+ const at::Tensor& input,
514
+ const at::Tensor& target,
515
+ const c10::optional<at::Tensor>& weight,
516
+ int64_t reduction);
517
+ at::Tensor smooth_l1_loss_double_backward(
518
+ const at::Tensor& grad,
519
+ const at::Tensor& input,
520
+ const at::Tensor& target,
521
+ int64_t reduction,
522
+ double beta);
523
+ at::Tensor huber_loss_double_backward(
524
+ const at::Tensor& grad,
525
+ const at::Tensor& input,
526
+ const at::Tensor& target,
527
+ int64_t reduction,
528
+ double delta);
529
+ at::Tensor huber_loss_double_backward_grad_output(
530
+ const at::Tensor& grad,
531
+ const at::Tensor& grad_output,
532
+ const at::Tensor& input,
533
+ const at::Tensor& target,
534
+ int64_t reduction,
535
+ double delta);
536
+ at::Tensor mse_loss_double_backward(
537
+ const at::Tensor& grad,
538
+ const at::Tensor& input,
539
+ int64_t reduction);
540
+ at::Tensor soft_margin_loss_double_backward(
541
+ const at::Tensor& grad,
542
+ const at::Tensor& input,
543
+ const at::Tensor& target,
544
+ int64_t reduction);
545
+ at::Tensor soft_margin_loss_double_backward_grad_output(
546
+ const at::Tensor& grad,
547
+ const at::Tensor& grad_output,
548
+ const at::Tensor& input,
549
+ const at::Tensor& target,
550
+ int64_t reduction);
551
+ at::Tensor softplus_double_backward(
552
+ const at::Tensor& grad,
553
+ const at::Tensor& input,
554
+ const at::Scalar& beta,
555
+ const at::Scalar& threshold);
556
+ std::tuple<at::Tensor, at::Tensor> slogdet_jvp(
557
+ const at::Tensor& LU,
558
+ const at::Tensor& pivots,
559
+ const at::Tensor& dA,
560
+ const at::Tensor& sign,
561
+ const bool use_A_T);
562
+ at::Tensor slogdet_backward(
563
+ const at::Tensor& grad_sign,
564
+ const at::Tensor& grad_logabsdet,
565
+ const at::Tensor& A,
566
+ const at::Tensor& signdet,
567
+ const at::Tensor& LU,
568
+ const at::Tensor& pivots);
569
+ at::Tensor log1p_backward(const at::Tensor& grad, const at::Tensor& self);
570
+ at::Tensor sinc_backward(const at::Tensor& grad, const at::Tensor& self);
571
+ at::Tensor sparse_constructor_values_backward(
572
+ const at::Tensor& sparse_grad_out,
573
+ const at::Tensor& indices);
574
+ at::Tensor embedding_dense_double_backward_symint(
575
+ const at::Tensor& grad,
576
+ const at::Tensor& indices,
577
+ const c10::SymInt& padding_idx);
578
+ at::Tensor index_backward(
579
+ at::Tensor zeros_like_self,
580
+ const torch::List<c10::optional<Tensor>>& indices,
581
+ const at::Tensor& grad);
582
+ at::Tensor _cudnn_ctc_loss_backward(
583
+ const at::Tensor& grad_out,
584
+ const at::Tensor& loss,
585
+ const at::Tensor& raw_grad,
586
+ bool zero_infinity);
587
+ at::Tensor elu_double_backward(
588
+ const Tensor& grad,
589
+ const Tensor& grad_output,
590
+ const Scalar& alpha,
591
+ const Scalar& scale,
592
+ const Scalar& input_scale,
593
+ bool is_result,
594
+ const Tensor& self_or_result);
595
+
596
+ Tensor svd_backward(
597
+ const Tensor& gU,
598
+ const Tensor& gS,
599
+ const Tensor& gVh,
600
+ const Tensor& U,
601
+ const Tensor& S,
602
+ const Tensor& Vh);
603
+
604
+ std::tuple<Tensor, Tensor, Tensor> linalg_svd_jvp(
605
+ const Tensor& dA,
606
+ const Tensor& U,
607
+ const Tensor& S,
608
+ const Tensor& Vh,
609
+ const bool full_matrices);
610
+ Tensor slice_backward_wrapper(
611
+ const at::Tensor& grad,
612
+ const c10::SymIntArrayRef& input_sizes,
613
+ int64_t dim,
614
+ c10::optional<c10::SymInt> start,
615
+ c10::optional<c10::SymInt> end,
616
+ c10::SymInt step);
617
+ std::tuple<Tensor, Tensor> linalg_eig_jvp(
618
+ const Tensor& dA,
619
+ const Tensor& L,
620
+ const Tensor& V,
621
+ const bool is_hermitian);
622
+ Tensor linalg_eig_backward(
623
+ const Tensor& gL,
624
+ const Tensor& gV,
625
+ const Tensor& L,
626
+ const Tensor& V,
627
+ const bool is_hermitian,
628
+ const bool symeig_eigenvectors = true);
629
+ Tensor linalg_lstsq_jvp(
630
+ const Tensor& A,
631
+ const Tensor& B,
632
+ const Tensor& dA,
633
+ const Tensor& dB);
634
+ std::tuple<Tensor, Tensor> triangular_solve_backward(
635
+ const Tensor& grad_x,
636
+ const Tensor& grad_m,
637
+ const Tensor& b,
638
+ const Tensor& a,
639
+ const Tensor& x,
640
+ const bool upper,
641
+ const bool transpose,
642
+ const bool unitriangular,
643
+ std::array<bool, 2> output_mask);
644
+ Tensor triangular_solve_jvp(
645
+ const Tensor& X,
646
+ const Tensor& A,
647
+ const Tensor& dA,
648
+ const Tensor& dB,
649
+ const bool upper,
650
+ const bool transpose,
651
+ const bool unitriangular);
652
+ Tensor linalg_solve_triangular_forward_AD(
653
+ const Tensor& A_t,
654
+ const Tensor& B_t,
655
+ const Tensor& A,
656
+ const Tensor& X,
657
+ const bool upper,
658
+ const bool left,
659
+ const bool unitriangular);
660
+ std::tuple<Tensor, Tensor> linalg_solve_triangular_backward(
661
+ const Tensor& grad,
662
+ const Tensor& A,
663
+ const Tensor& X,
664
+ const bool upper,
665
+ const bool left,
666
+ const bool unitriangular,
667
+ std::array<bool, 2> output_mask);
668
+ std::tuple<Tensor, Tensor, Tensor> _trilinear_backward(
669
+ const Tensor& grad_out,
670
+ const c10::optional<Tensor>& i1,
671
+ const c10::optional<Tensor>& i2,
672
+ const c10::optional<Tensor>& i3,
673
+ IntArrayRef expand1,
674
+ IntArrayRef expand2,
675
+ IntArrayRef expand3,
676
+ IntArrayRef sumdim,
677
+ std::array<bool, 3> grad_mask);
678
+ std::tuple<Tensor, Tensor> linalg_qr_jvp(
679
+ const Tensor& dA,
680
+ const Tensor& Q,
681
+ const Tensor& R,
682
+ const c10::string_view mode);
683
+ Tensor linalg_qr_backward(
684
+ const Tensor& gQ,
685
+ const Tensor& gR,
686
+ const Tensor& Q,
687
+ const Tensor& R,
688
+ const c10::string_view mode);
689
+ Tensor linalg_matrix_exp_differential(
690
+ const Tensor& self,
691
+ const Tensor& grad,
692
+ bool adjoint);
693
+ std::tuple<Tensor, Tensor, Tensor> batchnorm_double_backward(
694
+ const Tensor& input,
695
+ const c10::optional<Tensor>& gamma,
696
+ const Tensor& ggI,
697
+ const Tensor& ggG,
698
+ const Tensor& ggB,
699
+ const Tensor& gO,
700
+ const c10::optional<Tensor>& running_mean,
701
+ const c10::optional<Tensor>& running_var,
702
+ bool training,
703
+ double eps,
704
+ const c10::optional<Tensor>& save_mean,
705
+ const c10::optional<Tensor>& save_invstd,
706
+ std::array<bool, 3> output_mask);
707
+ std::tuple<Tensor, Tensor> _euclidean_dist_backward(
708
+ const Tensor& grad,
709
+ const Tensor& x1,
710
+ const Tensor& x2,
711
+ const Tensor& res);
712
+ Tensor fft_backward(
713
+ const Tensor& self,
714
+ const Tensor& grad,
715
+ int64_t signal_ndim,
716
+ bool complex_input,
717
+ bool complex_output,
718
+ bool inverse,
719
+ IntArrayRef checked_signal_sizes,
720
+ int64_t normalization,
721
+ bool onesided,
722
+ IntArrayRef output_sizes);
723
+ Tensor fft_r2c_backward(
724
+ const Tensor& grad,
725
+ at::IntArrayRef dim,
726
+ int64_t normalization,
727
+ bool onesided,
728
+ const c10::SymInt& last_dim_size);
729
+ Tensor fft_c2r_backward(
730
+ const Tensor& grad,
731
+ IntArrayRef dim,
732
+ int64_t normalization);
733
+ Tensor constant_pad_nd_backward(const Tensor& grad, c10::SymIntArrayRef pad);
734
+ std::tuple<Tensor, Tensor> cholesky_solve_backward(
735
+ const Tensor& grad_x,
736
+ const Tensor& self,
737
+ const Tensor& input2,
738
+ const Tensor& result,
739
+ const bool upper,
740
+ std::array<bool, 2> output_mask);
741
+ Tensor cholesky_solve_jvp(
742
+ const Tensor& X,
743
+ const Tensor& U,
744
+ const Tensor& dU,
745
+ const Tensor& dB,
746
+ const bool upper);
747
+ std::tuple<Tensor, Tensor, Tensor>
748
+ infinitely_differentiable_native_group_norm_backward(
749
+ const Tensor& dY,
750
+ const Tensor& dmean,
751
+ const Tensor& drstd,
752
+ const Tensor& X,
753
+ const Tensor& mean,
754
+ const Tensor& rstd,
755
+ const c10::optional<Tensor>& gamma,
756
+ c10::SymInt N,
757
+ const c10::SymInt& C,
758
+ c10::SymInt HxW,
759
+ int64_t group,
760
+ double eps,
761
+ std::array<bool, 3> grad_input_mask);
762
+ Tensor gelu_double_backward(
763
+ const Tensor& ggI,
764
+ const Tensor& gO,
765
+ const Tensor& input,
766
+ c10::string_view approximate);
767
+ Tensor as_strided_backward(
768
+ Tensor grad,
769
+ const TensorGeometry& input_geometry,
770
+ c10::SymIntArrayRef sizes,
771
+ c10::SymIntArrayRef strides,
772
+ const optional<c10::SymInt>& storage_offset_);
773
+ Tensor as_strided_scatter_backward(
774
+ const Tensor& grad,
775
+ const TensorGeometry& input_geometry,
776
+ const TensorGeometry& src_geometry,
777
+ c10::SymIntArrayRef sizes,
778
+ c10::SymIntArrayRef strides,
779
+ optional<c10::SymInt> storage_offset);
780
+ std::tuple<Tensor, Tensor> atan2_backward(
781
+ const Tensor& grad,
782
+ const Tensor& self,
783
+ const Tensor& other,
784
+ std::array<bool, 2> output_mask);
785
+ Tensor amaxamin_jvp(
786
+ const Tensor& x,
787
+ const Tensor& dx,
788
+ const Tensor& result,
789
+ IntArrayRef dim,
790
+ bool keepdim);
791
+ std::tuple<Tensor, Tensor, Tensor> layer_norm_double_backward(
792
+ const Tensor& input,
793
+ const c10::optional<Tensor>& gamma,
794
+ const Tensor& ggI,
795
+ const Tensor& ggG,
796
+ const Tensor& ggB,
797
+ const Tensor& gO,
798
+ const Tensor& save_mean,
799
+ const Tensor& save_invstd,
800
+ c10::SymIntArrayRef normalized_shape,
801
+ std::array<bool, 3> output_mask);
802
+
803
+ std::tuple<Tensor, Tensor> householder_product_backward(
804
+ const Tensor& grad,
805
+ const Tensor& result,
806
+ const Tensor& input,
807
+ const Tensor& tau,
808
+ const bool flip_order = false);
809
+ Tensor householder_product_jvp(
810
+ const Tensor& dV,
811
+ const Tensor& dtau,
812
+ const Tensor& prod,
813
+ const Tensor& V,
814
+ const Tensor& tau);
815
+ std::tuple<Tensor, Tensor, Tensor> ormqr_backward(
816
+ const Tensor& grad,
817
+ const Tensor& result,
818
+ const Tensor& self,
819
+ const Tensor& tau,
820
+ const Tensor& other,
821
+ bool left,
822
+ bool transpose,
823
+ std::array<bool, 3> grad_output_mask);
824
+ std::tuple<Tensor, Tensor> polar_backward(
825
+ const Tensor& grad,
826
+ const Tensor& result);
827
+ Tensor i1_backward(
828
+ const Tensor& grad,
829
+ const Tensor& self,
830
+ const Tensor& result);
831
+ Tensor i1e_backward(
832
+ const Tensor& grad,
833
+ const Tensor& self,
834
+ const Tensor& result);
835
+ Tensor linalg_lu_solve_LU(
836
+ const Tensor& grad,
837
+ const Tensor& LU,
838
+ const Tensor& pivots,
839
+ const Tensor& X,
840
+ const bool left,
841
+ const bool adjoint);
842
+ Tensor linalg_lu_solve_jvp(
843
+ const Tensor& X,
844
+ const Tensor& LU,
845
+ const Tensor& pivots,
846
+ const Tensor& dLU,
847
+ const Tensor& dB,
848
+ const bool left,
849
+ const bool adjoint);
850
+ std::tuple<Tensor, Tensor> linalg_solve_backward(
851
+ const Tensor& gX,
852
+ const Tensor& X,
853
+ const Tensor& A,
854
+ const Tensor& LU,
855
+ const Tensor& pivots,
856
+ const bool left,
857
+ const bool B_requires_grad);
858
+ Tensor linalg_solve_jvp(
859
+ const Tensor& dA,
860
+ const Tensor& dB,
861
+ const Tensor& X,
862
+ const Tensor& LU,
863
+ const Tensor& pivots,
864
+ const bool left,
865
+ const bool use_A_T);
866
+ Tensor lu_unpack_backward(
867
+ const Tensor& L_grad,
868
+ const Tensor& U_grad,
869
+ const c10::SymInt& m,
870
+ const c10::SymInt& n);
871
+
872
+ Tensor linalg_det_backward(
873
+ const Tensor& grad,
874
+ const Tensor& det,
875
+ const Tensor& A,
876
+ const Tensor& LU,
877
+ const Tensor& pivots);
878
+ Tensor linalg_det_jvp(
879
+ const Tensor& dA,
880
+ const Tensor& det,
881
+ const Tensor& LU,
882
+ const Tensor& pivots,
883
+ const bool use_A_T);
884
+ std::tuple<Tensor, Tensor> linalg_lstsq_backward(
885
+ const Tensor& grad,
886
+ const Tensor& A,
887
+ const Tensor& B_,
888
+ const std::array<bool, 2>& grad_input_mask);
889
+ Tensor linalg_lu_backward(
890
+ const Tensor& L_grad,
891
+ const Tensor& U_grad,
892
+ const Tensor& P,
893
+ const Tensor& L,
894
+ const Tensor& U,
895
+ const bool pivot);
896
+
897
+ std::tuple<Tensor, Tensor> linalg_lu_jvp(
898
+ const Tensor& dA,
899
+ const Tensor& P,
900
+ const Tensor& L,
901
+ const Tensor& U,
902
+ const bool pivot);
903
+
904
+ Tensor lu_factor_ex_backward(
905
+ const Tensor& grad,
906
+ const Tensor& LU,
907
+ const Tensor& pivs,
908
+ const bool pivot);
909
+ Tensor lu_factor_ex_jvp(
910
+ const Tensor& dX,
911
+ const Tensor& LU,
912
+ const Tensor& pivs,
913
+ const bool pivot);
914
+
915
+ Tensor batch_norm_jvp(
916
+ const Tensor& input_p,
917
+ const Tensor& input_t,
918
+ const Tensor& weight_p,
919
+ const Tensor& weight_t,
920
+ const Tensor& bias_p,
921
+ const Tensor& bias_t,
922
+ const c10::optional<Tensor>& running_mean,
923
+ const c10::optional<Tensor>& running_var,
924
+ const Tensor& saved_mean,
925
+ const Tensor& saved_invstd,
926
+ bool train,
927
+ double eps);
928
+
929
+ Tensor layer_norm_jvp(
930
+ const Tensor& input_p,
931
+ const Tensor& input_t,
932
+ const Tensor& weight_p,
933
+ const Tensor& weight_t,
934
+ const Tensor& bias_p,
935
+ const Tensor& bias_t,
936
+ const Tensor& saved_mean,
937
+ const Tensor& saved_invstd,
938
+ c10::SymIntArrayRef normalized_shape);
939
+
940
+ Tensor group_norm_jvp(
941
+ const Tensor& input_p,
942
+ const Tensor& input_t,
943
+ const Tensor& weight_p,
944
+ const Tensor& weight_t,
945
+ const Tensor& bias_p,
946
+ const Tensor& bias_t,
947
+ const Tensor& saved_mean,
948
+ const Tensor& saved_invstd,
949
+ int64_t groups);
950
+ Tensor group_norm_mean_jvp(
951
+ const Tensor& input_t,
952
+ const Tensor& mean_p,
953
+ int64_t groups);
954
+ Tensor group_norm_invstd_jvp(
955
+ const Tensor& input_p,
956
+ const Tensor& input_t,
957
+ const Tensor& mean_p,
958
+ const Tensor& invstd_p,
959
+ int64_t groups);
960
+
961
+ Tensor convolution_jvp(
962
+ const Tensor& input_p,
963
+ const Tensor& input_t,
964
+ const Tensor& weight_p,
965
+ const Tensor& weight_t,
966
+ const Tensor& bias_p,
967
+ const Tensor& bias_t,
968
+ at::SymIntArrayRef stride,
969
+ at::SymIntArrayRef padding,
970
+ at::SymIntArrayRef dilation,
971
+ bool transposed,
972
+ at::SymIntArrayRef output_padding,
973
+ const c10::SymInt& groups);
974
+
975
+ Tensor _convolution_jvp(
976
+ const Tensor& input_p,
977
+ const Tensor& input_t,
978
+ const Tensor& weight_p,
979
+ const Tensor& weight_t,
980
+ const Tensor& bias_p,
981
+ const Tensor& bias_t,
982
+ at::SymIntArrayRef stride,
983
+ at::SymIntArrayRef padding,
984
+ at::SymIntArrayRef dilation,
985
+ bool transposed,
986
+ at::SymIntArrayRef output_padding,
987
+ const c10::SymInt& groups,
988
+ bool benchmark,
989
+ bool deterministic,
990
+ bool cudnn_enabled,
991
+ bool allow_tf32);
992
+
993
+ Tensor convolution_backward_jvp_grad_bias(
994
+ const Tensor& grad_out_t,
995
+ const Tensor& grad_bias);
996
+
997
+ Tensor cat_jvp(const at::ITensorListRef& tensors, int64_t dim);
998
+ Tensor block_diag_jvp(at::TensorList tensors);
999
+ Tensor stack_jvp(at::TensorList tensors, int64_t dim);
1000
+ Tensor cumprod_jvp(
1001
+ const Tensor& self_t,
1002
+ const Tensor& self_p,
1003
+ const Tensor& result,
1004
+ int dim);
1005
+ Tensor gather_with_keepdimed_indices(
1006
+ const Tensor& input,
1007
+ int64_t dim,
1008
+ const Tensor& indices,
1009
+ bool keepdim);
1010
+ Tensor evenly_read_jvp(
1011
+ const Tensor& fw_grad,
1012
+ const Tensor& input,
1013
+ const Tensor& value);
1014
+ Tensor warn_backwards(const Tensor& grad_output);
1015
+
1016
+ std::tuple<Tensor, Tensor> _cudnn_convolution_backward(
1017
+ const at::Tensor& self,
1018
+ const at::Tensor& grad_output,
1019
+ const at::Tensor& weight,
1020
+ at::SymIntArrayRef padding,
1021
+ at::SymIntArrayRef output_padding,
1022
+ at::SymIntArrayRef stride,
1023
+ at::SymIntArrayRef dilation,
1024
+ bool transposed,
1025
+ c10::SymInt groups,
1026
+ ::std::array<bool, 2> output_mask);
1027
+
1028
+ Tensor scatter_reduce_jvp(
1029
+ const Tensor& self_p,
1030
+ const Tensor& self_t,
1031
+ int dim,
1032
+ const Tensor& index,
1033
+ const Tensor& src_p,
1034
+ const Tensor& src_t,
1035
+ c10::string_view reduce,
1036
+ bool include_self,
1037
+ const Tensor& result);
1038
+
1039
+ std::tuple<Tensor, Tensor> scatter_reduce_backward(
1040
+ const Tensor& grad,
1041
+ const Tensor& self,
1042
+ int dim,
1043
+ const Tensor& index,
1044
+ const Tensor& src,
1045
+ c10::string_view reduce,
1046
+ bool include_self,
1047
+ const Tensor& result);
1048
+
1049
+ Tensor _to_copy_backward(
1050
+ const Tensor& grad,
1051
+ const c10::TensorOptions& self_options);
1052
+
1053
+ std::tuple<Tensor, Tensor> index_reduce_backward(
1054
+ const Tensor& grad,
1055
+ const Tensor& self,
1056
+ int dim,
1057
+ const Tensor& index,
1058
+ const Tensor& source,
1059
+ c10::string_view reduce,
1060
+ bool include_self,
1061
+ const Tensor& result);
1062
+
1063
+ Tensor take_backward(
1064
+ const Tensor& grad,
1065
+ const Tensor& self,
1066
+ const Tensor& indices);
1067
+
1068
+ Tensor to_sparse_backward(
1069
+ const Tensor& grad,
1070
+ const c10::Layout self_layout,
1071
+ const c10::OptionalArrayRef<c10::SymInt>& self_blocksize);
1072
+
1073
+ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor>
1074
+ mkldnn_rnn_layer_differentiable_backward(
1075
+ const Tensor& input,
1076
+ const Tensor& weight0,
1077
+ const Tensor& weight1,
1078
+ const Tensor& weight2,
1079
+ const Tensor& weight3,
1080
+ const Tensor& hx_,
1081
+ const Tensor& cx_tmp,
1082
+ const Tensor& output,
1083
+ const Tensor& hy_,
1084
+ const Tensor& cy_,
1085
+ const c10::optional<Tensor>& grad_output_r_opt,
1086
+ const c10::optional<Tensor>& grad_hy_r_opt,
1087
+ const c10::optional<Tensor>& grad_cy_r_opt,
1088
+ bool reverse,
1089
+ int64_t mode,
1090
+ int64_t hidden_size,
1091
+ int64_t num_layers,
1092
+ bool has_biases,
1093
+ bool train,
1094
+ bool bidirectional,
1095
+ at::IntArrayRef batch_sizes,
1096
+ bool batch_first,
1097
+ const at::Tensor& workspace);
1098
+
1099
+ Tensor values_backward(const Tensor& grad, const Tensor& self);
1100
+
1101
+ } // namespace torch::autograd::generated::details
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <memory>
5
+ #include <string>
6
+
7
+ namespace torch::autograd {
8
+
9
+ // forward declaration of Node from function.h
10
+ struct Node;
11
+
12
+ struct TORCH_API AnomalyMode {
13
+ static bool is_enabled() {
14
+ return _enabled;
15
+ }
16
+ static bool should_check_nan() {
17
+ return _check_nan;
18
+ }
19
+ static void set_enabled(bool enabled, bool check_nan = true) {
20
+ _enabled = enabled;
21
+ _check_nan = check_nan;
22
+ }
23
+
24
+ private:
25
+ static bool _enabled;
26
+ static bool _check_nan;
27
+ };
28
+
29
+ /// A RAII guard that enables Anomaly Detection Mode.
30
+ ///
31
+ /// Anomaly detection mode is useful for debugging problems happening
32
+ /// in the backward, such as unexpectedly modified tensors or NaNs
33
+ /// occuring in the backward.
34
+ ///
35
+ /// The enabling of anomaly mode is global - as soon as there is one
36
+ /// such guard, it is enabled for all computation and threads. It also
37
+ /// comes with a significant performance penalty.
38
+ ///
39
+ /// Example:
40
+ /// @code
41
+ /// auto x = torch::tensor({1.}, torch::requires_grad());
42
+ /// {
43
+ /// torch::autograd::DetectAnomalyGuard detect_anomaly;
44
+ /// auto x = torch::tensor({5.0}, torch::requires_grad());
45
+ /// auto y = x * x;
46
+ /// auto z = y * y;
47
+ /// y += 1;
48
+ /// z.backward();
49
+ /// }
50
+ /// @endcode
51
+ class TORCH_API DetectAnomalyGuard {
52
+ public:
53
+ DetectAnomalyGuard(bool check_nan = true);
54
+ ~DetectAnomalyGuard();
55
+
56
+ private:
57
+ bool prev_check_nan_;
58
+ };
59
+
60
+ struct TORCH_API AnomalyMetadata {
61
+ virtual ~AnomalyMetadata();
62
+ virtual void store_stack();
63
+ virtual void print_stack(const std::string& current_node_name);
64
+ virtual void assign_parent(const std::shared_ptr<Node>& parent_node);
65
+
66
+ private:
67
+ std::string traceback_;
68
+ std::shared_ptr<Node> parent_;
69
+ };
70
+
71
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/variable.h>
4
+
5
+ namespace torch::autograd {
6
+
7
+ /// Computes the sum of gradients of given tensors with respect to graph leaves.
8
+ ///
9
+ /// The graph is differentiated using the chain rule. If any of ``tensors``
10
+ /// are non-scalar (i.e. their data has more than one element) and require
11
+ /// gradient, then the Jacobian-vector product would be computed, in this case
12
+ /// the function additionally requires specifying `grad_tensors`. It should be a
13
+ /// sequence of matching length, that contains the "vector" in the
14
+ /// Jacobian-vector product, usually the gradient of the differentiated function
15
+ /// w.r.t. corresponding tensors
16
+ /// (`torch::Tensor()` is an acceptable value for all tensors that don't need
17
+ /// gradient tensors).
18
+ ///
19
+ /// This function accumulates gradients in the leaves - you might need to zero
20
+ /// them before calling it.
21
+ ///
22
+ /// \param tensors Tensors of which the derivative will be computed.
23
+ /// \param grad_tensors The "vector" in the Jacobian-vector product, usually
24
+ /// gradients
25
+ /// w.r.t. each element of corresponding tensors. `torch::Tensor()` values
26
+ /// can be specified for scalar Tensors or ones that don't require grad. If
27
+ /// a `torch::Tensor()` value would be acceptable for all grad_tensors, then
28
+ /// this argument is optional.
29
+ /// \param retain_graph If `false`, the graph used to compute the grad will be
30
+ /// freed.
31
+ /// Note that in nearly all cases setting this option to `true` is not
32
+ /// needed and often can be worked around in a much more efficient way.
33
+ /// Defaults to the value of `create_graph`.
34
+ /// \param create_graph If `true`, graph of the derivative will be constructed,
35
+ /// allowing
36
+ /// to compute higher order derivative products. Defaults to `false`.
37
+ /// \param inputs Inputs w.r.t. which the gradient will be accumulated into
38
+ /// `at::Tensor::grad`. All other Tensors will be ignored. If not provided,
39
+ /// the gradient is accumulated into all the leaf Tensors that were used to
40
+ /// compute param `tensors`.
41
+ // When inputs are provided and a given input is not a leaf,
42
+ // the current implementation will call its grad_fn (even though it is not
43
+ // strictly needed to get this gradients). It is an implementation detail
44
+ // on which the user should not rely. See
45
+ // https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for
46
+ // more details.
47
+ TORCH_API void backward(
48
+ const variable_list& tensors,
49
+ const variable_list& grad_tensors = {},
50
+ c10::optional<bool> retain_graph = c10::nullopt,
51
+ bool create_graph = false,
52
+ const variable_list& inputs = {});
53
+
54
+ /// Computes and returns the sum of gradients of outputs with respect to the
55
+ /// inputs.
56
+ ///
57
+ /// ``grad_outputs`` should be a sequence of length matching ``output``
58
+ /// containing the "vector" in Jacobian-vector product, usually the pre-computed
59
+ /// gradients w.r.t. each of the outputs. If an output doesn't require_grad,
60
+ /// then the gradient can be ``torch::Tensor()``).
61
+ ///
62
+ /// \param outputs outputs of the differentiated function.
63
+ /// \param inputs Inputs w.r.t. which the gradient will be
64
+ /// returned (and not accumulated into ``at::Tensor::grad``).
65
+ /// \param grad_outputs The "vector" in the Jacobian-vector product.
66
+ /// Usually gradients w.r.t. each output. `torch::Tensor()` values can be
67
+ /// specified for scalar Tensors or ones that don't require grad. If a
68
+ /// `torch::Tensor()` value would be acceptable for all grad_tensors, then
69
+ /// this argument is optional. Default: `{}`.
70
+ /// \param retain_graph If ``false``, the graph used to compute the grad
71
+ /// will be freed. Note that in nearly all cases setting this option to
72
+ /// ``true`` is not needed and often can be worked around in a much more
73
+ /// efficient way. Defaults to the value of ``create_graph``.
74
+ /// \param create_graph If ``true``, graph of the derivative will
75
+ /// be constructed, allowing to compute higher order derivative products.
76
+ /// Default: ``false``.
77
+ /// \param allow_unused If ``false``, specifying inputs that were not
78
+ /// used when computing outputs (and therefore their grad is always zero)
79
+ /// is an error. Defaults to ``false``.
80
+ TORCH_API variable_list grad(
81
+ const variable_list& outputs,
82
+ const variable_list& inputs,
83
+ const variable_list& grad_outputs = {},
84
+ c10::optional<bool> retain_graph = c10::nullopt,
85
+ bool create_graph = false,
86
+ bool allow_unused = false);
87
+
88
+ namespace forward_ad {
89
+
90
+ /// Creates a new dual level and returns its index. This level index should then
91
+ /// be used to call into the other functions below. This API supports entering a
92
+ /// new level before the previous one is exited. We call them nested forward AD
93
+ /// levels. These can be used to compute higher order derivatives.
94
+ TORCH_API uint64_t enter_dual_level();
95
+
96
+ /// Exits the given level. This will clear up all the gradients from this level
97
+ /// and all dual Tensors that had gradients for this level will become regular
98
+ /// Tensors again. This function can only be used to exit the innermost nesting
99
+ /// level and so exiting must happen in reverse order compared to the entering
100
+ /// that was done with the function above.
101
+ TORCH_API void exit_dual_level(uint64_t level);
102
+
103
+ } // namespace forward_ad
104
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/autograd/function_hook.h>
3
+ #include <functional>
4
+ #include <memory>
5
+
6
+ namespace torch::autograd {
7
+
8
+ using hooks_list =
9
+ std::vector<std::function<at::TensorBase(const at::TensorBase&)>>;
10
+
11
+ struct CppFunctionTensorPreHook : public FunctionPreHook {
12
+ CppFunctionTensorPreHook(std::shared_ptr<hooks_list> hooks, size_t value_idx);
13
+ variable_list operator()(const variable_list& values) override;
14
+
15
+ std::shared_ptr<hooks_list> hooks_;
16
+ size_t value_idx_;
17
+ };
18
+
19
+ struct CppFunctionSingleTensorPreHook : public FunctionPreHook {
20
+ CppFunctionSingleTensorPreHook(
21
+ std::function<at::TensorBase(const at::TensorBase&)> hook,
22
+ size_t value_idx);
23
+ variable_list operator()(const variable_list& values) override;
24
+
25
+ std::function<at::TensorBase(const at::TensorBase&)> hook_;
26
+ size_t value_idx_;
27
+ };
28
+
29
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Engine implements backpropagation from output variables and their gradients
4
+ // to "root" variables (variables created by the user with requires_grad=True).
5
+
6
+ #include <ATen/Tensor.h>
7
+ #include <ATen/ThreadLocalState.h>
8
+ #include <ATen/core/ivalue.h>
9
+ #include <torch/csrc/Export.h>
10
+ #include <torch/csrc/autograd/anomaly_mode.h>
11
+ #include <torch/csrc/autograd/function.h>
12
+ #include <torch/csrc/autograd/functions/basic_ops.h>
13
+ #include <torch/csrc/autograd/graph_task.h>
14
+ #include <torch/csrc/autograd/input_buffer.h>
15
+ #include <torch/csrc/autograd/saved_variable_hooks.h>
16
+ #include <torch/csrc/autograd/utils/warnings.h>
17
+
18
+ #include <c10/util/CallOnce.h>
19
+
20
+ #include <exception>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <queue>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ namespace torch::autograd {
28
+ struct ReadyQueue;
29
+ }
30
+
31
+ namespace torch::autograd {
32
+
33
+ // Maximum reentrant backward depth before switching to a new thread
34
+ // This limit is based on the TSAN's deadlock detector, where it will
35
+ // fail if a program hold more than 65 locks in one thread at once.
36
+ // As we hold mutex in every of our custom C++ autograd Node, we would
37
+ // like to avoid TSAN complains on this when doing reentrant backwards
38
+ // For reference, see https://github.com/google/sanitizers/issues/950
39
+ static constexpr int MAX_DEPTH = 60;
40
+
41
+ void set_device(int device);
42
+ TORCH_API void validate_outputs(
43
+ const edge_list& edges,
44
+ variable_list& grads,
45
+ const std::function<std::string(const std::string&)>& format_error);
46
+
47
+ struct NodeTask {
48
+ std::weak_ptr<GraphTask> base_;
49
+ std::shared_ptr<Node> fn_;
50
+ // This buffer serves as an implicit "addition" node for all of the
51
+ // gradients flowing here. Once all the dependencies are finished, we
52
+ // use the contents of this buffer to run the function.
53
+ InputBuffer inputs_;
54
+ // When worker receives a task with isShutdownTask = true, it will immediately
55
+ // exit. The engine sends a shutdown task to every queue upon its destruction.
56
+ bool isShutdownTask_;
57
+
58
+ int getReentrantDepth() const;
59
+
60
+ NodeTask(
61
+ std::weak_ptr<GraphTask> base,
62
+ std::shared_ptr<Node> fn,
63
+ InputBuffer inputs,
64
+ bool isShutdownTask = false)
65
+ : base_(std::move(base)),
66
+ fn_(std::move(fn)),
67
+ inputs_(std::move(inputs)),
68
+ isShutdownTask_(isShutdownTask) {}
69
+ };
70
+
71
+ // Guard that sets and restores checkpoint_valid
72
+ class CheckpointValidGuard {
73
+ public:
74
+ explicit CheckpointValidGuard(
75
+ const std::shared_ptr<const GraphTask>& graph_task);
76
+ ~CheckpointValidGuard();
77
+
78
+ private:
79
+ bool prev_checkpoint_valid_state;
80
+ };
81
+
82
+ struct ReadyQueue {
83
+ private:
84
+ // Returns true when t2 should be (weakly) BEFORE t1 in the queue.
85
+ // Shutdown tasks are first and then empty NodeTask are next.
86
+ struct CompareNodeTaskTime {
87
+ bool operator()(NodeTask const& t1, NodeTask const& t2) {
88
+ // NOLINTNEXTLINE(bugprone-branch-clone)
89
+ if (t2.isShutdownTask_) {
90
+ return true;
91
+ } else if (!t1.fn_ || t1.isShutdownTask_) {
92
+ return false;
93
+ } else if (!t2.fn_) {
94
+ return true;
95
+ } else if (t1.getReentrantDepth() == t2.getReentrantDepth()) {
96
+ return t1.fn_->sequence_nr() < t2.fn_->sequence_nr();
97
+ } else {
98
+ return t1.getReentrantDepth() < t2.getReentrantDepth();
99
+ }
100
+ }
101
+ };
102
+
103
+ // To notify threads waiting on the ReadyQueue of available tasks on the heap_
104
+ std::condition_variable not_empty_;
105
+ // To protect read and writes to heap_
106
+ mutable std::mutex mutex_;
107
+
108
+ std::priority_queue<NodeTask, std::vector<NodeTask>, CompareNodeTaskTime>
109
+ heap_;
110
+
111
+ public:
112
+ // incrementOutstandingTasks indicates whether or not we should increment
113
+ // 'outstanding_tasks_' for the associated GraphTask. This should mostly
114
+ // always be true and is only set false in certain cases (see docs for
115
+ // DistEngine.execute_graph_task_until_ready_queue_empty)
116
+ void push(NodeTask item, bool incrementOutstandingTasks = true);
117
+ void pushShutdownTask();
118
+ NodeTask pop();
119
+ bool empty() const;
120
+ size_t size() const;
121
+ };
122
+
123
+ // A single instance of this struct should be created through the whole process
124
+ // lifetime. The worker thread creation logic and Engine's destructor rely on
125
+ // this.
126
+ struct TORCH_API Engine {
127
+ /// Returns a reference to a static `Engine` instance.
128
+ static Engine& get_default_engine();
129
+
130
+ static Engine& get_base_engine();
131
+
132
+ // compiled_autograd needs to live in a different .so file so that it
133
+ // can have python symbols, so we add a layer of indirection
134
+ // see [Note: Compiled Autograd]
135
+ typedef variable_list (*compiled_autograd_fn)(
136
+ const std::shared_ptr<Node>& graph_root,
137
+ GraphTask& graph_task,
138
+ bool accumulate_grad,
139
+ const edge_list& outputs);
140
+ static void set_compiled_autograd(compiled_autograd_fn fn);
141
+
142
+ Engine(const Engine&) = delete;
143
+ Engine(Engine&&) = delete;
144
+ virtual ~Engine();
145
+
146
+ // Given a list of (Node, input number) pairs computes the value of the graph
147
+ // by following next_edge references.
148
+ virtual variable_list execute(
149
+ const edge_list& roots,
150
+ const variable_list& inputs,
151
+ bool keep_graph,
152
+ bool create_graph,
153
+ bool accumulate_grad,
154
+ const edge_list& outputs = {});
155
+
156
+ // Given a pre-populated GraphTask and GraphRoot, computes the backward pass
157
+ // for the graph.
158
+ //
159
+ // NB: This API should only be used by internal autograd specific
160
+ // machinery and shouldn't be exposed to users in anyway.
161
+ virtual c10::intrusive_ptr<at::ivalue::Future> execute_with_graph_task(
162
+ const std::shared_ptr<GraphTask>& graph_task,
163
+ std::shared_ptr<Node> graph_root,
164
+ InputBuffer&& input_buffer);
165
+
166
+ virtual std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() {
167
+ return std::make_unique<AnomalyMetadata>();
168
+ }
169
+
170
+ virtual std::unique_ptr<SavedVariableHooks> get_default_saved_variable_hooks() {
171
+ return nullptr;
172
+ }
173
+
174
+ // We pass cpu_ready_queue to evaluate_function, so that it knows
175
+ // the correct ready queue to push to after a NodeTask is ready
176
+ void evaluate_function(
177
+ std::shared_ptr<GraphTask>& graph_task,
178
+ Node* func,
179
+ InputBuffer& inputs,
180
+ const std::shared_ptr<ReadyQueue>& cpu_ready_queue);
181
+
182
+ void initialize_device_threads_pool();
183
+ virtual void thread_on_exception(
184
+ std::shared_ptr<GraphTask> graph_task,
185
+ const std::shared_ptr<Node>& fn,
186
+ std::exception& e);
187
+
188
+ void queue_callback(std::function<void()> callback);
189
+
190
+ bool is_checkpoint_valid();
191
+
192
+ // Should be called after fork to notify that worker threads are gone
193
+ void release_workers();
194
+
195
+ // Must be called by subclass before destructing to avoid a data-race-on-vptr.
196
+ void stop();
197
+
198
+ // Initializes a device thread for the autograd engine.
199
+ virtual void thread_init(
200
+ int device,
201
+ const std::shared_ptr<ReadyQueue>& ready_queue,
202
+ bool should_increment = true);
203
+
204
+ protected:
205
+ Engine();
206
+ void compute_dependencies(Node* root, GraphTask& task, uint64_t min_topo_nr);
207
+
208
+ // initialize the thread local ready queue with the ready queue that is
209
+ // created elsewhere (i.e. thread_init, Engine::execute, etc), or create a new
210
+ // ready queue if ready_queue is not provided.
211
+ void init_local_ready_queue(
212
+ std::shared_ptr<ReadyQueue> ready_queue = nullptr);
213
+
214
+ std::shared_ptr<ReadyQueue> ready_queue(
215
+ std::shared_ptr<ReadyQueue> cpu_ready_queue,
216
+ at::Device device);
217
+ std::shared_ptr<ReadyQueue> ready_queue_by_index(
218
+ std::shared_ptr<ReadyQueue> cpu_ready_queue,
219
+ int device_index);
220
+ // start device threads (CUDA, XLA, etc.) in Engine,
221
+ // note that it does NOT start CPU thread.
222
+ void start_device_threads();
223
+ void increment_non_reentrant_thread_count();
224
+ void decrement_non_reentrant_thread_count();
225
+ virtual void thread_main(const std::shared_ptr<GraphTask>& task);
226
+ void reentrant_thread_init();
227
+ void add_thread_pool_task(const std::weak_ptr<GraphTask>& graph_task);
228
+
229
+ // Ensures device_ready_queues_ are initialized only once
230
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
231
+ c10::once_flag start_device_threads_flag_;
232
+ // Safe to read device_ready_queues_ without synchronization after
233
+ // initialization
234
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
235
+ std::vector<std::shared_ptr<ReadyQueue>> device_ready_queues_;
236
+
237
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
238
+ std::vector<std::function<void()>> final_callbacks_;
239
+ // To protect reads and writes to final_callbacks_
240
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
241
+ std::mutex post_callbacks_lock_;
242
+
243
+ // How many nested reentrant calls are allowed until a new thread is used
244
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
245
+ int max_recursion_depth_;
246
+
247
+ struct ThreadPoolShared {
248
+ // Data structures used by the threads for executing reentrant backwards
249
+ // tasks. See Note [Reentrant backwards]
250
+ // Number of available threads for processing new GraphTasks.
251
+ unsigned int num_workers_{0};
252
+ // The threads will wait on work_ to be notified of GraphTasks
253
+ std::condition_variable work_;
254
+ // To protect reads and writes to graphtask_queue_ and num_workers_
255
+ // and for synchronizing creating new threads when needed
256
+ std::mutex mutex_;
257
+ // Workers will process the GraphTasks added to this queue. A GraphTask is
258
+ // allocated inside Engine::execute and lives for the duration of execute
259
+ std::queue<std::weak_ptr<GraphTask>> graphtasks_queue_;
260
+
261
+ ThreadPoolShared() = default;
262
+ };
263
+
264
+ // Temporary workaround until shutting down threads is done
265
+ // We need shared ownership of all these objects because the threads are
266
+ // leaked when Engine shuts down, so there may be threads waiting on work_ for
267
+ // the graphtasks_queue_ to be nonempty.
268
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
269
+ std::shared_ptr<ThreadPoolShared> thread_pool_shared_;
270
+
271
+ private:
272
+ // Number of non-reentrant threads
273
+ std::atomic<uint32_t> non_reentrant_device_thread_count_;
274
+ // Destructor will wait for non-reentrant threads to finish
275
+ std::condition_variable non_reentrant_device_thread_condvar_;
276
+ std::mutex non_reentrant_device_thread_mutex_;
277
+ // stop() must be called before the destruction path goes down to the base
278
+ // class, in order to avoid a data-race-on-vptr. Use this boolean to guard
279
+ // whether stop() has already been called, so we can call this in every
280
+ // destructor of the class hierarchy.
281
+ bool stopped_{false};
282
+ };
283
+
284
+ // allow python_engine to override the default engine when it loads
285
+ using EngineStub = Engine& (*)();
286
+ TORCH_API void set_default_engine_stub(EngineStub stub);
287
+
288
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <unordered_set>
5
+
6
+ namespace torch::autograd {
7
+
8
+ // [ Using ForwardGrad ]
9
+ // ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner
10
+ // design. But this shared_ptr must be uniquely associated with the object that
11
+ // stores it (as of writing, either AutogradMeta or SavedVariable). This object
12
+ // is called the "owning object" in the discussions below. This owning object
13
+ // must call `ForwardGrad::clear()` when it is destroyed to ensure that the
14
+ // ForwardGrad is properly de-allocated.
15
+
16
+ struct ForwardGrad;
17
+
18
+ // This file contains two classes that are used to store forward AD gradients
19
+ // and ensure that they are scoped properly. Because forward AD runs
20
+ // concurrently with the evaluation of the function, we need a mechanism to
21
+ // separate different forward AD invocations and be able to compute the right
22
+ // gradients. We model such invocations as levels here. The particular scoping
23
+ // issue mentioned above has two main drivers:
24
+ // - Ensure that we can conveniently use forward AD within a high level API
25
+ // without
26
+ // leaking the forward AD states outside.
27
+ // - Ensure that we can keep the level that we expose to the user API simple
28
+ // (an integer
29
+ // that represents the nesting depth) while avoiding confusions when the
30
+ // level index is re-used.
31
+
32
+ // The important external APIs from this file are:
33
+ // - ForwardADLevel::get_next_idx() that can be used to enter a new level and
34
+ // get its index
35
+ // - ForwardADLevel::release_idx() that can be used to exit a given level.
36
+ // - ForwardGrad() can be used to store a given forward gradient that will
37
+ // handle the level
38
+ // tracking automatically.
39
+
40
+ // The basic implementation strategy is as follows:
41
+ // Every tensor has a ForwardGrad, maintaining a map from levels to tangents.
42
+ // ForwardGrad is responsible for registering itself to the appropriate
43
+ // ForwardADLevel when a new tangent is added to it via ForwardGrad::set_value
44
+ // and to un-register itself from this same level if that tangent is removed via
45
+ // ForwardGrad::reset. The ForwardADLevel is created when a new level is entered
46
+ // via ForwardADLevel::get_next_idx. A reference to the new ForwardADLevel is
47
+ // stored into a global (for the whole process) vector that ensure it can be
48
+ // accessed via ForwardADLevel::get_by_idx. This reference is deleted when the
49
+ // index is released by the user when calling ForwardADLevel::release_idx. When
50
+ // it is destructed, the ForwardADLevel is responsible for clearing all the
51
+ // tangents for its level stored in all the ForwardGrad that registered with it.
52
+ //
53
+ // This process-wide level design, compared to a thread local one, allows us to
54
+ // use very simple user facing handle for the level (an int) while enabling
55
+ // cross-thread forward AD. The only required synchronization for the user is
56
+ // when entering and exiting the levels. Some discussion on alternative design
57
+ // is in https://github.com/pytorch/pytorch/pull/49097#discussion_r543716453 and
58
+ // can be refined in the future.
59
+
60
+ // Correctness of concurrency:
61
+ // Each class uses its own lock when reading or modifying internal storages.
62
+ // This allows in particular to safely remove tangents from ForwardGrad when the
63
+ // ForwardADLevel is being exited. We ensure no deadlock by ensuring that a
64
+ // methods never calls into another class's method while the local class's lock
65
+ // is held except in one single case: calling from ForwardADLevel's destructor
66
+ // into ForwardGrad::reset with update_level=false.
67
+
68
+ // The lifetime of these objects is as follows:
69
+ // The ForwardADLevel can be in three states:
70
+ // - Initialized: where one of its reference is held by the global vector
71
+ // and there may be more
72
+ // references held by temporary variables in ForwardGrad's methods.
73
+ // - About to be destructed: where "release_idx" has been called and the
74
+ // only reason for the
75
+ // ForwardADLevel not to be destructed right away is that some methods in
76
+ // ForwardGrad have owning reference to it. This is done so that a
77
+ // ForwardADLevel can never be destructed when a ForwardGrad is
78
+ // registered with it and in the process of adding something to its
79
+ // internal state.
80
+ // - Being destructed: Here the ForwardADLevel is not referenced anymore
81
+ // and can be safely reset
82
+ // all of the ForwardGrad. Note that we can have more than one reset
83
+ // being called here (which is ok) but we are guaranteed that there is at
84
+ // least one.
85
+ // The ForwardGrad is simpler as there is no intermediary state and no special
86
+ // destructor for. The logic to unregister it from the different ForwardADLevel
87
+ // is done when the owning object (AutogradMeta or SavedVariable) is being
88
+ // destroyed.
89
+
90
+ // Other considered design:
91
+ // To avoid having the ForwardGrad::clear, we considered storing weak_ptr inside
92
+ // the ForwardADLevel. While this would work, it would mean that the set inside
93
+ // the ForwardADLevel would only grow unless we do an expensive linear scan to
94
+ // remove all the dangling weak pointers. Hence this approach was not used.
95
+
96
+ // Data structures in this file are optimized for this maximum number of levels.
97
+ // The number of levels corresponds to the degree of the gradient being
98
+ // computed using forward AD and we don't expect more than second order
99
+ // gradients to be common.
100
+ #define EXPECTED_MAX_LEVEL 2
101
+
102
+ struct TORCH_API ForwardADLevel {
103
+ ForwardADLevel(uint64_t idx) : idx_(idx) {}
104
+ ~ForwardADLevel();
105
+
106
+ static uint64_t get_next_idx();
107
+ static void release_idx(uint64_t idx);
108
+ static std::shared_ptr<ForwardADLevel> get_by_idx(uint64_t idx);
109
+ static std::shared_ptr<ForwardADLevel> try_get_by_idx(uint64_t idx);
110
+
111
+ void erase(const std::shared_ptr<ForwardGrad>& grad) {
112
+ std::lock_guard<std::mutex> lock(mutex_);
113
+ grads_.erase(grad);
114
+ }
115
+
116
+ void insert(const std::shared_ptr<ForwardGrad>& grad) {
117
+ std::lock_guard<std::mutex> lock(mutex_);
118
+ grads_.insert(grad);
119
+ }
120
+
121
+ private:
122
+ std::unordered_set<std::shared_ptr<ForwardGrad>> grads_;
123
+ std::mutex mutex_;
124
+ uint64_t idx_;
125
+ };
126
+
127
+ struct TORCH_API ForwardGrad : std::enable_shared_from_this<ForwardGrad> {
128
+ ForwardGrad() = default;
129
+
130
+ // This function must only be called when AutogradMeta or SavedVariable is
131
+ // being destructed as it ensures that:
132
+ // - The only (potential) other references to this ForwardGrad are the
133
+ // different level it is registered to
134
+ // - No other thread will try to call `set_value` or `value` ever from now
135
+ // on
136
+ // - Any of the ForwardADLevel that this ForwardGrad is registered with
137
+ // might
138
+ // call `reset` at any point during this function
139
+ void clear() {
140
+ c10::SmallVector<uint64_t, EXPECTED_MAX_LEVEL> levels_idx;
141
+
142
+ {
143
+ std::lock_guard<std::mutex> lock(mutex_);
144
+ for (auto& c : content_) {
145
+ levels_idx.push_back(c.first);
146
+ }
147
+ }
148
+
149
+ for (auto l_idx : levels_idx) {
150
+ // Use "try" version here as another thread might have deleted this
151
+ // level before we got here
152
+ // This is an owning reference as we want to keep the level alive
153
+ // until we successfully unregister ourselves
154
+ auto level = ForwardADLevel::try_get_by_idx(l_idx);
155
+ if (level) {
156
+ level->erase(shared_from_this());
157
+ }
158
+ }
159
+ }
160
+
161
+ void set_value(const at::Tensor& value, uint64_t level) {
162
+ // Owning reference to ensure the forward_level is not destroyed
163
+ // while we are updating our internal state
164
+ auto forward_level = ForwardADLevel::get_by_idx(level);
165
+ forward_level->insert(shared_from_this());
166
+
167
+ std::lock_guard<std::mutex> lock(mutex_);
168
+ content_.insert({level, value});
169
+ }
170
+
171
+ // This function removes the tangent for a given level from this ForwardGrad
172
+ // Use the update_level flag to disable notifying the level about this reset
173
+ // This flag is most notably used by the ForwardADLevel destructor.
174
+ void reset(uint64_t level, bool update_level = true) {
175
+ if (update_level) {
176
+ ForwardADLevel::get_by_idx(level)->erase(shared_from_this());
177
+ }
178
+
179
+ std::unique_lock<std::mutex> lock(mutex_);
180
+ const auto& it = content_.find(level);
181
+ TORCH_INTERNAL_ASSERT(
182
+ it != content_.end(), "Resetting a non-existent level.");
183
+ // Keep the Tensor alive until we have released the lock
184
+ // This is needed as we can be in a case where this function is called by
185
+ // ForwardADLevel destructor
186
+ auto t = (*it).second;
187
+ content_.erase(level);
188
+ lock.unlock();
189
+ }
190
+
191
+ const at::Tensor& value(uint64_t level) const;
192
+
193
+ bool contains(uint64_t level) {
194
+ std::lock_guard<std::mutex> lock(mutex_);
195
+ return content_.count(level) > 0;
196
+ }
197
+
198
+ bool empty() const {
199
+ return content_.empty();
200
+ }
201
+
202
+ static const at::Tensor& undef_grad();
203
+
204
+ private:
205
+ // TODO(albanD): replace this with a SmallVector
206
+ std::unordered_map<uint64_t, at::Tensor> content_;
207
+ mutable std::mutex mutex_;
208
+ };
209
+
210
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CachedTensorUtils.h>
4
+ #include <ATen/LegacyBatchedTensorImpl.h>
5
+ #include <ATen/TensorOperators.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/autograd/function.h>
8
+ #include <torch/csrc/autograd/utils/grad_layout_contract.h>
9
+ #include <torch/csrc/autograd/variable.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/Functions.h>
13
+ #else
14
+ #include <ATen/ops/_sparse_coo_tensor_unsafe.h>
15
+ #endif
16
+
17
+ #include <mutex>
18
+
19
+ namespace torch {
20
+ namespace autograd {
21
+
22
+ #define CHECK_RESULT(RESULT, VAR) \
23
+ if (!(RESULT.is_sparse() || VAR.is_sparse() || RESULT.is_sparse_csr() || \
24
+ VAR.is_sparse_csr())) { \
25
+ if (!utils::obeys_layout_contract(RESULT, VAR)) { \
26
+ TORCH_WARN_ONCE( \
27
+ "grad and param do not obey the gradient layout contract. " \
28
+ "This is not an error, but may impair performance.\n" \
29
+ "grad.sizes() = ", \
30
+ RESULT.sizes(), \
31
+ ", strides() = ", \
32
+ RESULT.strides(), \
33
+ "\n", \
34
+ "param.sizes() = ", \
35
+ VAR.sizes(), \
36
+ ", strides() = ", \
37
+ VAR.strides()); \
38
+ } \
39
+ }
40
+
41
+ struct TORCH_API AccumulateGrad : public Node {
42
+ explicit AccumulateGrad(Variable variable_);
43
+
44
+ variable_list apply(variable_list&& grads) override;
45
+
46
+ std::vector<std::unique_ptr<FunctionPreHook>>& tensor_pre_hooks() noexcept
47
+ override {
48
+ // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
49
+ // it can be destroyed even though the Tensor is still alive (contrary
50
+ // to all other Nodes). So we must lazily read the Tensor hooks here.
51
+ return impl::hooks(variable);
52
+ }
53
+
54
+ std::unique_ptr<PostAccumulateGradHook>& tensor_post_acc_grad_hooks() noexcept
55
+ override {
56
+ // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
57
+ // it can be destroyed even though the Tensor is still alive (contrary
58
+ // to all other Nodes). So we must lazily read the Tensor hooks here.
59
+ return impl::post_acc_grad_hooks(variable);
60
+ }
61
+
62
+ // Given a variable with its current grad as variable_grad, accumulates
63
+ // new_grad into variable_grad if in place accumulation is possible.
64
+ // Otherwise, uses 'update_grad' to update the grad for the variable.
65
+
66
+ // "Gradient Layout Contract"
67
+ //
68
+ // AccumulateGrad tries to stash strided (non-sparse) grads with memory layout
69
+ // (strides) such that variables and grads interact efficiently in later
70
+ // optimizer kernels, and grads interact efficiently with c10d::Reducer.cpp.
71
+ //
72
+ // Specifically, AccumulateGrad tries to ensure the following
73
+ // (cf torch/csrc/autograd/utils/grad_layout_contract.h):
74
+ // (1) if variable.is_non_overlapping_and_dense(), the stashed grad's
75
+ // strides match variable.
76
+ // (2) else, stashed grad is rowmajor contiguous.
77
+ // If variable's grad does not exist (!variable_grad.defined())
78
+ // AccumulateGrad steals new_grad if it's stealable and obeys the contract
79
+ // already, otherwise it deep copies new_grad into an obedient clone.
80
+ //
81
+ // If variable's grad already exists (variable_grad.defined()), new_grad must
82
+ // be added to variable_grad. If we aren't setting up for double backward
83
+ // (!GradMode::is_enabled()), AccumulateGrad performs "variable_grad +=
84
+ // new_grad" in-place, which keeps variable_grad's layout. We assume (hope)
85
+ // variable_grad was created obeying (1) or (2) at some point in the past.
86
+ //
87
+ // If we are setting up for double backward, AccumulateGrad updates the grad
88
+ // out-of-place via "variable_grad + new_grad." TensorIterator operator+
89
+ // decides result's layout. Typically TensorIterator matches strides of the
90
+ // first arg, so we once again assume (hope) variable_grad was originally
91
+ // created obeying (1) or (2).
92
+ //
93
+ // AccumulateGrad does not enforce the contract with 100% certainty. Examples:
94
+ // - If a user manually permutes a param or its grad, then runs a fwd+bwd,
95
+ // variable_grad += new_grad keeps variable_grad's layout without
96
+ // rechecking the contract.
97
+ // - If TensorIterator changes its corner cases about operator+'s result
98
+ // (for example, giving more or less priority to channels_last inputs, see
99
+ // https://github.com/pytorch/pytorch/pull/37968) the result may not obey.
100
+ //
101
+ // Fortunately, if a given grad doesn't satisfy (1) or (2), the penalty is
102
+ // degraded performance in Reducer.cpp or optimizer kernels, not death by
103
+ // assert or silently bad numerics.
104
+
105
+ // variable: the variable whose grad we're accumulating.
106
+ // variable_grad: the current grad for the variable.
107
+ // new_grad: new grad we want to accumulate for the variable.
108
+ // num_expected_refs: the number of refs we expect to hold internally
109
+ // such that it is safe to avoid cloning the grad
110
+ // if use_count() of the grad is less than or equal
111
+ // to this value (in addition to post_hooks).
112
+ // update_grad: Function that is used to update grad for the variable.
113
+ // The argument to the function is a Tensor which
114
+ // is used to set a new value for the grad.
115
+ template <typename T>
116
+ static void accumulateGrad(
117
+ const Variable& variable,
118
+ at::Tensor& variable_grad,
119
+ const at::Tensor& new_grad,
120
+ size_t num_expected_refs,
121
+ const T& update_grad) {
122
+ if (!variable_grad.defined()) {
123
+ if (!GradMode::is_enabled() && !new_grad.is_sparse() &&
124
+ !new_grad.is_sparse_csr() &&
125
+ !(variable.is_sparse_csr() && new_grad.layout() == at::kStrided) &&
126
+ at::caching::adjusted_use_count(new_grad) <= num_expected_refs &&
127
+ (new_grad.is_mkldnn() ||
128
+ utils::obeys_layout_contract(new_grad, variable))) {
129
+ // we aren't setting up for double-backward
130
+ // not sparse
131
+ // no other user-visible tensor references new_grad
132
+ // new_grad obeys the "Gradient Layout Contract", there has a special
133
+ // case, For MKLDNN tensor, which is a opaque tensor, assuming it obeys
134
+ // layout_contract. Under these conditions, we can steal new_grad
135
+ // without a deep copy.
136
+ update_grad(new_grad.detach());
137
+ } else if (
138
+ !GradMode::is_enabled() && new_grad.is_sparse() &&
139
+ new_grad._indices().is_contiguous() &&
140
+ new_grad._values().is_contiguous() &&
141
+ // Use count for indices and values should always be <=1 since the
142
+ // SparseTensor should be the only one holding a reference to these.
143
+ new_grad._indices().use_count() <= 1 &&
144
+ new_grad._values().use_count() <= 1 &&
145
+ new_grad.use_count() <= num_expected_refs) {
146
+ // Can't detach sparse tensor (since metadata changes are not allowed
147
+ // after detach), so just create a new one for the grad which is a
148
+ // shallow copy. We need a shallow copy so that modifying the original
149
+ // grad tensor doesn't modify the grad we accumulate.
150
+ // We only skip clone if indices and values themselves are contiguous
151
+ // for backward compatibility reasons. Since without this optimization,
152
+ // earlier we would clone the entire SparseTensor which cloned indices
153
+ // and values.
154
+ // For details see https://github.com/pytorch/pytorch/issues/34375.
155
+
156
+ // No scenario where we expect this to be true currently
157
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
158
+ !at::caching::is_cached_tensor(new_grad._indices()) &&
159
+ !at::caching::is_cached_tensor(new_grad._values()) &&
160
+ !at::caching::is_cached_tensor(new_grad));
161
+
162
+ update_grad(at::_sparse_coo_tensor_unsafe(
163
+ new_grad._indices(),
164
+ new_grad._values(),
165
+ new_grad.sizes(),
166
+ new_grad.options()));
167
+ } else {
168
+ if (new_grad.is_sparse() || new_grad.is_sparse_csr() ||
169
+ new_grad.is_nested()) {
170
+ update_grad(new_grad.clone());
171
+ } else {
172
+ if (new_grad.is_mkldnn()) {
173
+ update_grad(new_grad.clone());
174
+ } else {
175
+ // Deep copies new_grad according to the "Gradient Layout Contract."
176
+ update_grad(utils::clone_obey_contract(new_grad, variable));
177
+ }
178
+ }
179
+ }
180
+ } else if (!GradMode::is_enabled()) {
181
+ // This case is not strictly necessary, but it makes the first-order only
182
+ // case slightly more efficient.
183
+ if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
184
+ // If `variable_grad` is sparse and `new_grad` is not sparse, their
185
+ // sum is not sparse, and we must change the TensorImpl type of
186
+ // `variable_grad` for it to store the result. However, changing the
187
+ // TensorImpl type of a tensor requires changing the tensor itself, and
188
+ // thus in this case we have to change the grad tensor.
189
+ auto result = new_grad + variable_grad;
190
+ CHECK_RESULT(result, variable);
191
+ update_grad(std::move(result));
192
+ } else if (!at::inplaceIsVmapCompatible(variable_grad, new_grad)) {
193
+ // Ideally we'd perform an in-place operation to avoid changing
194
+ // the grad tensor. However, if that's impossible because the grads
195
+ // are vmap-incompatible (See NOTE: [vmap-incompatible in-place
196
+ // operations]), then we just add them out-of-place.
197
+ auto result = variable_grad + new_grad;
198
+ CHECK_RESULT(result, variable);
199
+ update_grad(std::move(result));
200
+ } else {
201
+ // In this case we can avoid changing the grad tensor. There are three
202
+ // scenarios when we'll hit this case:
203
+ //
204
+ // 1. `variable_grad` is sparse, and `new_grad` is sparse.
205
+ // 2. `variable_grad` is dense, and `new_grad` is sparse.
206
+ // 3. `variable_grad` is dense, and `new_grad` is dense.
207
+ // 4. `variable_grad` is mkldnn, and `new_grad` is mkldnn.
208
+ //
209
+ // In all of these four cases, `variable_grad += new_grad` is a
210
+ // valid operation which adds `new_grad` to `variable_grad` in
211
+ // place. `variable_grad` is thus still referring to the same tensor
212
+ // after the operation.
213
+ // Also DistributedDataParallel(DDP) package relies on grad being
214
+ // mutated in place for saving peak memory usage. DDP will still
215
+ // work correctly if it is mutated out of place here, but DDP will
216
+ // maintain one extra copy of grad tensors in buffer and thus
217
+ // increase peak memory usage.
218
+ variable_grad += new_grad;
219
+ CHECK_RESULT(variable_grad, variable);
220
+ // ^ We could enforce the contract more aggressively here by writing:
221
+ // if (variable_grad.is_sparse() || new_grad.is_sparse()) {
222
+ // variable_grad += new_grad;
223
+ // } else if (obeys_layout_contract(variable_grad, variable)) {
224
+ // variable_grad += new_grad;
225
+ // } else {
226
+ // result = at::empty_strided(variable.sizes(), variable.strides(),
227
+ // variable.options().memory_format(c10::nullopt));
228
+ // update_grad(at::native::add_out(result, variable_grad,
229
+ // new_grad, 1.0);
230
+ // }
231
+ // However, that accumulation is sometimes in place and sometimes not,
232
+ // which may break user code.
233
+ }
234
+ } else {
235
+ at::Tensor result;
236
+ if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
237
+ // CPU backend throws an error on sparse + dense, so prefer dense +
238
+ // sparse here.
239
+ result = new_grad + variable_grad;
240
+ } else {
241
+ // Assumes operator+ result typically matches strides of first arg,
242
+ // and hopes variable_grad was originally created obeying layout
243
+ // contract.
244
+ result = variable_grad + new_grad;
245
+ }
246
+ CHECK_RESULT(result, variable);
247
+ update_grad(std::move(result));
248
+ // ^ We could enforce the contract more aggressively here by saying
249
+ // if (obeys_layout_contract(new_grad, variable)) {
250
+ // update_grad(new_grad + variable_grad);
251
+ // } else {
252
+ // update_grad(variable_grad + new_grad);
253
+ // }
254
+ // such that the stashed grad is likely to have the right strides if
255
+ // either variable_grad or new_grad already has the right strides.
256
+ // We could enforce the contract with certainty by saying
257
+ // auto result = variable_grad + new_grad (or vice versa), checking
258
+ // result's layout, and copying to an obedient clone if necessary before
259
+ // update_grad. The copy would require another gmem pass. We can't create
260
+ // empty result with the right layout then add_out into it with a single
261
+ // kernel, because GradMode is enabled in this branch, and add_out isn't
262
+ // differentiable. Maybe more trouble than it's worth.
263
+ }
264
+ }
265
+
266
+ void compiled_args(CompiledNodeArgs& args) override;
267
+ variable_list apply_with_saved(
268
+ const variable_list& inputs,
269
+ SwapSavedVariables& saved) override;
270
+
271
+ Variable variable;
272
+ };
273
+
274
+ #undef CHECK_RESULT
275
+
276
+ } // namespace autograd
277
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/autograd/function.h>
6
+ #include <torch/csrc/autograd/variable.h>
7
+
8
+ #include <memory>
9
+ #include <string>
10
+ #include <vector>
11
+
12
+ namespace torch {
13
+ namespace autograd {
14
+
15
+ struct TORCH_API Error : public Node {
16
+ Error(std::string msg, edge_list&& next_edges)
17
+ : Node(std::move(next_edges)), msg(std::move(msg)) {}
18
+
19
+ Error(std::string msg) : msg(std::move(msg)) {}
20
+
21
+ variable_list apply(variable_list&& inputs) override;
22
+
23
+ void compiled_args(CompiledNodeArgs& args) override;
24
+ variable_list apply_with_saved(
25
+ const variable_list& inputs,
26
+ SwapSavedVariables& saved) override;
27
+
28
+ std::string msg;
29
+ };
30
+
31
+ // We print grad_fn names in tensor printing. For functions with backward
32
+ // NYI, grad_fn=<Error> will be printed if we use Error, which is confusing. So
33
+ // special case with a new NotImplemented function here.
34
+ struct TORCH_API NotImplemented : public Error {
35
+ NotImplemented(const std::string& forward_fn, edge_list&& next_edges)
36
+ : Error(
37
+ "derivative for " + forward_fn + " is not implemented",
38
+ std::move(next_edges)) {}
39
+
40
+ NotImplemented(const std::string& forward_fn)
41
+ : Error("derivative for " + forward_fn + " is not implemented") {}
42
+ };
43
+
44
+ // Identity in forward, Error in backward. Used to implement
45
+ // @once_differentiable
46
+ struct TORCH_API DelayedError : public Node {
47
+ DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) {
48
+ // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
49
+ for (const auto i : c10::irange(num_inputs)) {
50
+ (void)i; // Suppress unused variable warning
51
+ add_input_metadata(Node::undefined_input());
52
+ }
53
+ }
54
+
55
+ variable_list apply(variable_list&& inputs) override;
56
+
57
+ std::string msg;
58
+ };
59
+
60
+ struct TORCH_API UndefinedGrad : public Node {
61
+ UndefinedGrad() {
62
+ add_input_metadata(Node::undefined_input());
63
+ }
64
+
65
+ variable_list apply(variable_list&& inputs) override;
66
+ };
67
+
68
+ struct TORCH_API UndefinedGradBackward : public Node {
69
+ UndefinedGradBackward(edge_list&& next_edges) : Node(std::move(next_edges)) {}
70
+
71
+ UndefinedGradBackward() = default;
72
+
73
+ variable_list apply(variable_list&& inputs) override;
74
+
75
+ void compiled_args(CompiledNodeArgs& args) override {}
76
+ variable_list apply_with_saved(
77
+ const variable_list& inputs,
78
+ SwapSavedVariables& saved) override {
79
+ return apply(variable_list(inputs));
80
+ }
81
+ };
82
+
83
+ struct TORCH_API GraphRoot : public Node {
84
+ GraphRoot(edge_list functions, variable_list inputs)
85
+ : Node(std::move(functions)), outputs(std::move(inputs)) {
86
+ // Ensures calls to stream() on a GraphRoot instance reflect current
87
+ // stream(s) on devices of root grad tensors at the time the instance is
88
+ // constructed.
89
+ for (const auto& t : outputs) {
90
+ add_input_metadata(t);
91
+ }
92
+ }
93
+
94
+ variable_list apply(variable_list&& inputs) override {
95
+ return outputs;
96
+ }
97
+
98
+ void compiled_args(CompiledNodeArgs& args) override;
99
+ variable_list apply_with_saved(
100
+ const variable_list& inputs,
101
+ SwapSavedVariables& saved) override;
102
+
103
+ variable_list outputs;
104
+ };
105
+
106
+ struct TORCH_API Identity : public Node {
107
+ variable_list apply(variable_list&& inputs) override;
108
+ };
109
+
110
+ } // namespace autograd
111
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/autograd/function.h>
5
+ #include <torch/csrc/autograd/variable.h>
6
+
7
+ #include <ATen/ATen.h>
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <c10/util/Optional.h>
10
+
11
+ #include <cstddef>
12
+ #include <vector>
13
+
14
+ namespace torch {
15
+ namespace autograd {
16
+
17
+ struct TORCH_CUDA_CU_API Scatter : public Node {
18
+ explicit Scatter(
19
+ std::vector<at::Device> devices,
20
+ c10::optional<std::vector<int64_t>> chunk_sizes = c10::nullopt,
21
+ int64_t dim = 0,
22
+ c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams =
23
+ c10::nullopt,
24
+ bool unsqueeze_scalars = false);
25
+ ~Scatter() override;
26
+
27
+ variable_list apply(variable_list&& inputs) override;
28
+
29
+ std::vector<at::Device> devices_;
30
+ c10::optional<std::vector<int64_t>> chunk_sizes_;
31
+ int64_t dim_;
32
+ c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams_;
33
+ bool unsqueeze_scalars_;
34
+ };
35
+
36
+ struct TORCH_CUDA_CU_API Gather : public Node {
37
+ explicit Gather(const at::Device& destination_device, int64_t dim = 0);
38
+ ~Gather() override;
39
+
40
+ variable_list apply(variable_list&& inputs) override;
41
+
42
+ at::Device destination_device_;
43
+ int64_t dim_;
44
+ };
45
+
46
+ } // namespace autograd
47
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <pybind11/pybind11.h>
4
+ #include <pybind11/stl.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ #include <torch/csrc/autograd/python_cpp_function.h>
9
+ #include <torch/csrc/autograd/python_function.h>
10
+
11
+ namespace py = pybind11;
12
+
13
+ namespace pybind11 {
14
+ namespace detail {}
15
+ } // namespace pybind11
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/autograd/function.h>
5
+ #include <torch/csrc/autograd/variable.h>
6
+
7
+ #include <ATen/TensorGeometry.h>
8
+ #include <ATen/core/DeprecatedTypeProperties.h>
9
+ #include <c10/util/Optional.h>
10
+
11
+ #include <cstdint>
12
+ #include <memory>
13
+
14
+ namespace torch {
15
+ namespace autograd {
16
+
17
+ struct TORCH_API CopyBackwards : public Node {
18
+ variable_list apply(variable_list&& grads) override;
19
+ void compiled_args(CompiledNodeArgs& args) override;
20
+ variable_list apply_with_saved(
21
+ const variable_list& inputs,
22
+ SwapSavedVariables& saved) override;
23
+
24
+ at::TensorOptions src_options;
25
+ };
26
+
27
+ // Note [View + Inplace update for base tensor]
28
+ //
29
+ // This note covers a few important topics related to view + inplace handling.
30
+ // - It explains what is the CopySlices Node and why we need it.
31
+ // - It explains the considerations on what is saved for backward in
32
+ // CopySlices.
33
+ // - It explains why we need to sometimes change the exec_info of the current
34
+ // backward
35
+ //
36
+ // What is CopySlices?
37
+ // ~~~~~~~~~~~~~~~~~~~
38
+ //
39
+ // We support autograd with inplace mutation; e.g., if you write x.mul_(2)
40
+ // the autograd will work as if you now had multiple Tensors under the hood and
41
+ // you did
42
+ // x = t.clone()
43
+ // x0 = x
44
+ // x1 = x0 * 2
45
+ // x = x1
46
+ // As you can see here, after this operation, x.grad_fn now points to x1.grad_fn
47
+ // (the MulBackward node) and this node points to x's original grad_fn (which is
48
+ // also x0.grad_fn). It is important to keep in mind that after the inplace,
49
+ // there is no Tensor object that represents the x0 state anymore. But the graph
50
+ // for it is still around in autograd (in case x was used before being modified
51
+ // inplace). See Example 1 in
52
+ // https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
53
+ // We call this rebasing the history of the Tensor.
54
+ //
55
+ // Now, a difficult situation is what happens if x is a differentiable view
56
+ // of a base b.
57
+ // b = t.clone()
58
+ // x = b.select(0, 0)
59
+ // x *= 2
60
+ // With the same approach as above, this will become
61
+ // b = t.clone()
62
+ // x = b.select(0, 0)
63
+ // b0 = b
64
+ // x0 = x
65
+ // x1 = x0 * 2
66
+ // b1 = b0.select_scatter(x1, 0, 0)
67
+ // x2 = b1.select(0, 0)
68
+ // x = x2
69
+ // b = b1
70
+ // As you can see here, not only we need to modify x's grad_fn, we also need to
71
+ // modify the one from b. We also need to ensure that the new grad_fn on x is
72
+ // linked to b's new grad_fn. The chain the select_scatter, multiplication and
73
+ // select is what CopySlices does, all wrapped into a single Node.
74
+ //
75
+ // See Example 1 in
76
+ // https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
77
+ //
78
+ // What do we need to save in CopySlices to run backward?
79
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
80
+ //
81
+ // We need to perform grad_view = fn(grad_view), but out-of-place.
82
+ // view_fn_ is an optional function saved in DifferentiableViewMeta
83
+ // from forward pass, so that we can recover we when as_strided is not
84
+ // supported. It preserves the invariants:
85
+ // view = view_fn_(base)
86
+ // grad_view = view_fn_(grad_base)
87
+ //
88
+ // When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_
89
+ // is empty and we save TensorGeometry(view) instead.
90
+ // With the TensorGeometry information we can use `as_strided` call which
91
+ // is more efficient to recover views in backward.
92
+ //
93
+ // For example:
94
+ // view_1 = view_op_1(base)
95
+ // view_2 = view_op_2(view_1)
96
+ // ...
97
+ // view_n = view_op_n(view_n-1)
98
+ // view_n = inplace_op(view_n)
99
+ //
100
+ // In CPU/CUDA case where we support efficient as_strided implementation,
101
+ // grad_view_n can be calculated through 1 step.
102
+ //
103
+ // grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset);
104
+ //
105
+ // But in XLA backend where we don't have full support of as_strided,
106
+ // it has to save a chained lambda function view_fn_, to exactly
107
+ // replay how the view was done in forward.
108
+ //
109
+ // view_fn_ = view_op_n(...(view_op_2(view_op_1())))
110
+ // grad_view_n = view_fn_(grad_base)
111
+ //
112
+ // This chain view_fn_ works as long as forward view ops are implemented,
113
+ // e.g XLA simulates view without a real Storage behind Tensor, but it's less
114
+ // efficient than the as_strided one so we should be careful to only use it when
115
+ // necessary.
116
+ //
117
+ // - For CPU/CUDA we save TensorGeometry of both base and view tensors,
118
+ // That's all we need to pass into as_strided.
119
+ // E.g. int[] sizes, int[] strides, and int storage_offset.
120
+ // - For XLA we use view_fn_, which captures all forward view op arguments
121
+ // by **value**.
122
+ // E.g for at::narrow, int dim, int start, in length are saved.
123
+ //
124
+ // Theoretically we could also save Tensor `view` in CopySlices Node, but
125
+ // it's far more expensive than what we currently save.
126
+ // 1. We cannot afford keeping large tensors alive to recover views only.
127
+ // 2. There are inplace checks when Tensors are loaded back to make sure
128
+ // they haven't been changed (including size metadata).
129
+ // So saving metadata like TensorGeometry/view arguments is much better
130
+ // because it is minimal information needed to recover views, as well as it
131
+ // allows the user to modify the original Tensor without preventing the
132
+ // backward pass from running.
133
+ //
134
+ // Why do we manually change exec_info in the apply?
135
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
136
+ //
137
+ // Using the same example as before,
138
+ // b = t.clone()
139
+ // x = b.select(0, 0)
140
+ // x *= y
141
+ //
142
+ // You can see the visualization at
143
+ // https://docs.google.com/drawings/d/1Bx-Hcz-zlIv7PabQqnPhUIVIs9F8WWi48svqMsAUMFs
144
+ // which contains the wrapped MulBackward Node and show what it links to.
145
+ // Since a backward can happen between any subset of the inputs (t and y) and
146
+ // outputs (o, x, b). It is possible to get into a state where CopySlices's 0th
147
+ // next function (CloneBackward) needs gradient but MulBackward's 0th next
148
+ // function (SelectBackward) is not. This happens if you do autograd.grad
149
+ // between x and t for example.
150
+ // In such a case, we do need to mark SelectBackward as requiring gradient such
151
+ // that, during the execution of MulBackward, we will actually compute gradient
152
+ // for the 0th input.
153
+ //
154
+ // All the other next functions are always shared (this is asserted in the apply
155
+ // code) and so nothing needs to be done for them.
156
+
157
+ // See Note [View + Inplace update for view tensor] for what we do to view
158
+ // tensor when an in-place operation happens.
159
+ struct TORCH_API CopySlices : public Node {
160
+ CopySlices(
161
+ const Variable& base_var,
162
+ at::TensorGeometry view_,
163
+ std::unique_ptr<ViewFunc> view_fn_,
164
+ std::shared_ptr<Node> fn_);
165
+
166
+ // common code between apply/apply_with_saved
167
+ template <typename T>
168
+ variable_list apply_impl(variable_list&& inputs, const T& call_fn);
169
+
170
+ variable_list apply(variable_list&& inputs) override;
171
+ void release_variables() override;
172
+ void compiled_args(CompiledNodeArgs& args) override;
173
+ variable_list apply_with_saved(
174
+ const variable_list& inputs,
175
+ SwapSavedVariables& saved) override;
176
+
177
+ at::TensorGeometry base;
178
+ // view and view_fn are redundant and view_fn will be used if available.
179
+ // See Note [View + Inplace update for base tensor] for details.
180
+ at::TensorGeometry view;
181
+ std::unique_ptr<ViewFunc> view_fn;
182
+ std::shared_ptr<Node> fn;
183
+ };
184
+
185
+ } // namespace autograd
186
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/autograd/InferenceMode.h>
5
+ #include <torch/csrc/autograd/autograd.h>
6
+ #include <torch/csrc/autograd/function.h>
7
+ #include <torch/csrc/autograd/variable.h>
8
+ #include <torch/csrc/utils/variadic.h>
9
+
10
+ #include <ATen/core/Tensor.h>
11
+
12
+ #include <functional>
13
+ #include <memory>
14
+ #include <vector>
15
+
16
+ namespace torch {
17
+ namespace autograd {
18
+
19
+ using function_constructor = std::function<std::shared_ptr<Node>(edge_list&&)>;
20
+
21
+ /**
22
+ * Wraps the tensor outputs in variables and creates the grad_fn and sets the
23
+ * grad_fn if necessary.
24
+ */
25
+ TORCH_API variable_list wrap_outputs(
26
+ const variable_list& inputs,
27
+ tensor_list&& outputs,
28
+ const function_constructor& ctr);
29
+
30
+ /// Checks that inputs contains exactly `args` items and that the first
31
+ /// `required_args`
32
+ /// items are not nullptr. If not specified, `required_args` defaults to `args`.
33
+ TORCH_API void check_input_variables(
34
+ const char* name,
35
+ const variable_list& inputs,
36
+ int args,
37
+ int required_args = -1,
38
+ bool allow_undefined = false);
39
+
40
+ struct ComputeRequiresGrad : IterArgs<ComputeRequiresGrad> {
41
+ bool out = false;
42
+ using IterArgs<ComputeRequiresGrad>::operator();
43
+ void operator()(const at::Tensor& tensor) {
44
+ const auto& var = static_cast<const Variable&>(tensor);
45
+ if (var.defined() && var.requires_grad()) {
46
+ out = true;
47
+ }
48
+ }
49
+ void operator()(const c10::optional<at::Tensor>& tensor) {
50
+ if (tensor.has_value()) {
51
+ (*this)(*tensor);
52
+ }
53
+ }
54
+ bool short_circuit() {
55
+ return out;
56
+ }
57
+ };
58
+
59
+ template <typename... Args>
60
+ inline bool compute_requires_grad(Args&&... args) {
61
+ if (!GradMode::is_enabled()) {
62
+ return false;
63
+ }
64
+ return ComputeRequiresGrad().apply(std::forward<Args>(args)...).out;
65
+ }
66
+
67
+ inline void set_history(
68
+ const at::Tensor& variable,
69
+ const std::shared_ptr<Node>& grad_fn) {
70
+ TORCH_CHECK(grad_fn != nullptr);
71
+ if (variable.defined()) {
72
+ // If the codegen triggers this, you most likely want to add your newly
73
+ // added function to the DONT_REQUIRE_DERIVATIVE list in
74
+ // tools/autograd/gen_variable_type.py
75
+ TORCH_INTERNAL_ASSERT(isDifferentiableType(variable.scalar_type()));
76
+ auto output_nr = grad_fn->add_input_metadata(variable);
77
+ impl::set_gradient_edge(variable, {grad_fn, output_nr});
78
+ } else {
79
+ grad_fn->add_input_metadata(Node::undefined_input());
80
+ }
81
+ }
82
+
83
+ inline void set_history(
84
+ const std::vector<Variable>& variables,
85
+ const std::shared_ptr<Node>& grad_fn) {
86
+ for (auto& variable : variables) {
87
+ set_history(variable, grad_fn);
88
+ }
89
+ }
90
+
91
+ inline bool isFwGradDefined(const c10::optional<at::Tensor>& t) {
92
+ return t.has_value() && t->defined() && t->_fw_grad(/*level */ 0).defined();
93
+ }
94
+
95
+ inline bool isFwGradDefinedTensorList(const at::ITensorListRef& variables) {
96
+ bool ret = false;
97
+ for (auto& variable : variables) {
98
+ ret |= isFwGradDefined(variable);
99
+ }
100
+ return ret;
101
+ }
102
+
103
+ inline bool isFwGradDefinedTensorList(
104
+ const c10::List<c10::optional<at::Tensor>>& li) {
105
+ bool ret = false;
106
+ for (auto i : c10::irange(li.size())) {
107
+ auto t = li.get(i);
108
+ ret |= (t.has_value() && isFwGradDefined(t.value()));
109
+ }
110
+ return ret;
111
+ }
112
+
113
+ } // namespace autograd
114
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated from ../tools/autograd/templates/VariableType.h
4
+
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/Context.h>
7
+
8
+ #include <c10/util/intrusive_ptr.h>
9
+
10
+ #include <torch/csrc/Export.h>
11
+ #include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
12
+
13
+ #include <cstdint> // for size_t
14
+ #include <functional> // for function
15
+ #include <memory> // for unique_ptr
16
+ #include <string>
17
+ #include <vector>
18
+
19
+ namespace at {
20
+ struct Quantizer;
21
+ };
22
+
23
+ namespace torch { namespace autograd {
24
+
25
+ using Variable = at::Tensor;
26
+ using at::Context;
27
+ using at::Device;
28
+ using at::Dimname;
29
+ using at::DimnameList;
30
+ using at::Generator;
31
+ using at::IntArrayRef;
32
+ using at::MemoryFormat;
33
+ using at::QScheme;
34
+ using at::Scalar;
35
+ using at::ScalarType;
36
+ using at::Storage;
37
+ using at::Tensor;
38
+ using at::TensorList;
39
+ using at::TensorOptions;
40
+ using at::Quantizer;
41
+ // This is temporary typedef to enable Quantizer in aten native function API
42
+ // we'll remove them when we are actually exposing Quantizer class
43
+ // to frontend
44
+ using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
45
+ using c10::optional;
46
+
47
+ namespace VariableType {
48
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
49
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allXPUTypes();
50
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
51
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allPrivateUser1Types();
52
+
53
+ at::Tensor & unpack(Tensor & t, const char * name, int pos);
54
+ const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
55
+ at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
56
+ std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
57
+ };
58
+
59
+ }} // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/ViewFuncs.h ADDED
@@ -0,0 +1,953 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated from ../tools/autograd/templates/ViewFuncs.h
4
+
5
+ #include <torch/library.h>
6
+ #include <torch/csrc/autograd/variable.h>
7
+ #include <c10/core/SymIntArrayRef.h>
8
+
9
+ #ifndef AT_PER_OPERATOR_HEADERS
10
+ #include <ATen/Operators.h>
11
+ #else
12
+ #include <ATen/ops/_conj_ops.h>
13
+ #include <ATen/ops/_indices_ops.h>
14
+ #include <ATen/ops/_neg_view_ops.h>
15
+ #include <ATen/ops/_nested_get_values_ops.h>
16
+ #include <ATen/ops/_nested_view_from_buffer_ops.h>
17
+ #include <ATen/ops/_nested_view_from_jagged_ops.h>
18
+ #include <ATen/ops/_reshape_alias_ops.h>
19
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h>
20
+ #include <ATen/ops/_values_ops.h>
21
+ #include <ATen/ops/alias_ops.h>
22
+ #include <ATen/ops/as_strided_ops.h>
23
+ #include <ATen/ops/ccol_indices_ops.h>
24
+ #include <ATen/ops/chunk_ops.h>
25
+ #include <ATen/ops/col_indices_ops.h>
26
+ #include <ATen/ops/crow_indices_ops.h>
27
+ #include <ATen/ops/diagonal_ops.h>
28
+ #include <ATen/ops/expand_ops.h>
29
+ #include <ATen/ops/indices_ops.h>
30
+ #include <ATen/ops/narrow_ops.h>
31
+ #include <ATen/ops/permute_ops.h>
32
+ #include <ATen/ops/row_indices_ops.h>
33
+ #include <ATen/ops/select_ops.h>
34
+ #include <ATen/ops/slice_ops.h>
35
+ #include <ATen/ops/slice_inverse_ops.h>
36
+ #include <ATen/ops/split_ops.h>
37
+ #include <ATen/ops/split_with_sizes_ops.h>
38
+ #include <ATen/ops/squeeze_ops.h>
39
+ #include <ATen/ops/squeeze_ops.h>
40
+ #include <ATen/ops/squeeze_ops.h>
41
+ #include <ATen/ops/t_ops.h>
42
+ #include <ATen/ops/transpose_ops.h>
43
+ #include <ATen/ops/unbind_ops.h>
44
+ #include <ATen/ops/unfold_ops.h>
45
+ #include <ATen/ops/unsqueeze_ops.h>
46
+ #include <ATen/ops/values_ops.h>
47
+ #include <ATen/ops/view_ops.h>
48
+ #include <ATen/ops/view_ops.h>
49
+ #include <ATen/ops/view_as_complex_ops.h>
50
+ #include <ATen/ops/view_as_real_ops.h>
51
+ #endif
52
+
53
+ namespace torch::autograd::generated {
54
+
55
+ using at::Scalar;
56
+ using at::Tensor;
57
+ using at::IntArrayRef;
58
+ using at::ArrayRef;
59
+ using at::Type;
60
+ using at::ScalarType;
61
+ using c10::optional;
62
+ using c10::fmap;
63
+
64
+ #define _CONJ_VIEW_FUNC_AVAILABLE
65
+ struct _ConjViewFunc : public torch::autograd::ViewFunc {
66
+ _ConjViewFunc()
67
+ {};
68
+ virtual ~_ConjViewFunc() override {};
69
+ virtual std::vector<c10::SymInt> get_symints() const override;
70
+ virtual size_t num_symints() const override;
71
+ virtual std::vector<at::Tensor> get_tensors() const override;
72
+ virtual size_t num_tensors() const override;
73
+ virtual at::Tensor operator()(const at::Tensor&) const override;
74
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
75
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
76
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
77
+
78
+ protected:
79
+ virtual void set_symints(std::vector<c10::SymInt>) override;
80
+ virtual void set_tensors(std::vector<at::Tensor>) override;
81
+
82
+ private:
83
+
84
+ };
85
+
86
+ #define _INDICES_VIEW_FUNC_AVAILABLE
87
+ struct _IndicesViewFunc : public torch::autograd::ViewFunc {
88
+ _IndicesViewFunc()
89
+ {};
90
+ virtual ~_IndicesViewFunc() override {};
91
+ virtual std::vector<c10::SymInt> get_symints() const override;
92
+ virtual size_t num_symints() const override;
93
+ virtual std::vector<at::Tensor> get_tensors() const override;
94
+ virtual size_t num_tensors() const override;
95
+ virtual at::Tensor operator()(const at::Tensor&) const override;
96
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
97
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
98
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
99
+
100
+ protected:
101
+ virtual void set_symints(std::vector<c10::SymInt>) override;
102
+ virtual void set_tensors(std::vector<at::Tensor>) override;
103
+
104
+ private:
105
+
106
+ };
107
+
108
+ #define _NEG_VIEW_VIEW_FUNC_AVAILABLE
109
+ struct _NegViewViewFunc : public torch::autograd::ViewFunc {
110
+ _NegViewViewFunc()
111
+ {};
112
+ virtual ~_NegViewViewFunc() override {};
113
+ virtual std::vector<c10::SymInt> get_symints() const override;
114
+ virtual size_t num_symints() const override;
115
+ virtual std::vector<at::Tensor> get_tensors() const override;
116
+ virtual size_t num_tensors() const override;
117
+ virtual at::Tensor operator()(const at::Tensor&) const override;
118
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
119
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
120
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
121
+
122
+ protected:
123
+ virtual void set_symints(std::vector<c10::SymInt>) override;
124
+ virtual void set_tensors(std::vector<at::Tensor>) override;
125
+
126
+ private:
127
+
128
+ };
129
+
130
+ #define _NESTED_GET_VALUES_VIEW_FUNC_AVAILABLE
131
+ struct _NestedGetValuesViewFunc : public torch::autograd::ViewFunc {
132
+ _NestedGetValuesViewFunc()
133
+ {};
134
+ virtual ~_NestedGetValuesViewFunc() override {};
135
+ virtual std::vector<c10::SymInt> get_symints() const override;
136
+ virtual size_t num_symints() const override;
137
+ virtual std::vector<at::Tensor> get_tensors() const override;
138
+ virtual size_t num_tensors() const override;
139
+ virtual at::Tensor operator()(const at::Tensor&) const override;
140
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
141
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
142
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
143
+
144
+ protected:
145
+ virtual void set_symints(std::vector<c10::SymInt>) override;
146
+ virtual void set_tensors(std::vector<at::Tensor>) override;
147
+
148
+ private:
149
+
150
+ };
151
+
152
+ #define _NESTED_VIEW_FROM_BUFFER_VIEW_FUNC_AVAILABLE
153
+ struct _NestedViewFromBufferViewFunc : public torch::autograd::ViewFunc {
154
+ _NestedViewFromBufferViewFunc(const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) : nested_size(nested_size), nested_strides(nested_strides), offsets(offsets)
155
+ {};
156
+ virtual ~_NestedViewFromBufferViewFunc() override {};
157
+ virtual std::vector<c10::SymInt> get_symints() const override;
158
+ virtual size_t num_symints() const override;
159
+ virtual std::vector<at::Tensor> get_tensors() const override;
160
+ virtual size_t num_tensors() const override;
161
+ virtual at::Tensor operator()(const at::Tensor&) const override;
162
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
163
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
164
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
165
+
166
+ protected:
167
+ virtual void set_symints(std::vector<c10::SymInt>) override;
168
+ virtual void set_tensors(std::vector<at::Tensor>) override;
169
+
170
+ private:
171
+ at::Tensor nested_size;
172
+ at::Tensor nested_strides;
173
+ at::Tensor offsets;
174
+ };
175
+
176
+ #define _NESTED_VIEW_FROM_JAGGED_VIEW_FUNC_AVAILABLE
177
+ struct _NestedViewFromJaggedViewFunc : public torch::autograd::ViewFunc {
178
+ _NestedViewFromJaggedViewFunc(const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx) : offsets(offsets), dummy(dummy), lengths(lengths), ragged_idx(ragged_idx)
179
+ {};
180
+ virtual ~_NestedViewFromJaggedViewFunc() override {};
181
+ virtual std::vector<c10::SymInt> get_symints() const override;
182
+ virtual size_t num_symints() const override;
183
+ virtual std::vector<at::Tensor> get_tensors() const override;
184
+ virtual size_t num_tensors() const override;
185
+ virtual at::Tensor operator()(const at::Tensor&) const override;
186
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
187
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
188
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
189
+
190
+ protected:
191
+ virtual void set_symints(std::vector<c10::SymInt>) override;
192
+ virtual void set_tensors(std::vector<at::Tensor>) override;
193
+
194
+ private:
195
+ at::Tensor offsets;
196
+ at::Tensor dummy;
197
+ c10::optional<at::Tensor> lengths;
198
+ int64_t ragged_idx;
199
+ };
200
+
201
+ #define _RESHAPE_ALIAS_VIEW_FUNC_AVAILABLE
202
+ struct _ReshapeAliasViewFunc : public torch::autograd::ViewFunc {
203
+ _ReshapeAliasViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) : size(size.vec()), stride(stride.vec())
204
+ {};
205
+ virtual ~_ReshapeAliasViewFunc() override {};
206
+ virtual std::vector<c10::SymInt> get_symints() const override;
207
+ virtual size_t num_symints() const override;
208
+ virtual std::vector<at::Tensor> get_tensors() const override;
209
+ virtual size_t num_tensors() const override;
210
+ virtual at::Tensor operator()(const at::Tensor&) const override;
211
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
212
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
213
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
214
+
215
+ protected:
216
+ virtual void set_symints(std::vector<c10::SymInt>) override;
217
+ virtual void set_tensors(std::vector<at::Tensor>) override;
218
+
219
+ private:
220
+ ::std::vector<c10::SymInt> size;
221
+ ::std::vector<c10::SymInt> stride;
222
+ };
223
+
224
+ #define _TEST_AUTOGRAD_MULTIPLE_DISPATCH_VIEW_VIEW_FUNC_AVAILABLE
225
+ struct _TestAutogradMultipleDispatchViewViewFunc : public torch::autograd::ViewFunc {
226
+ _TestAutogradMultipleDispatchViewViewFunc()
227
+ {};
228
+ virtual ~_TestAutogradMultipleDispatchViewViewFunc() override {};
229
+ virtual std::vector<c10::SymInt> get_symints() const override;
230
+ virtual size_t num_symints() const override;
231
+ virtual std::vector<at::Tensor> get_tensors() const override;
232
+ virtual size_t num_tensors() const override;
233
+ virtual at::Tensor operator()(const at::Tensor&) const override;
234
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
235
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
236
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
237
+
238
+ protected:
239
+ virtual void set_symints(std::vector<c10::SymInt>) override;
240
+ virtual void set_tensors(std::vector<at::Tensor>) override;
241
+
242
+ private:
243
+
244
+ };
245
+
246
+ #define _VALUES_VIEW_FUNC_AVAILABLE
247
+ struct _ValuesViewFunc : public torch::autograd::ViewFunc {
248
+ _ValuesViewFunc()
249
+ {};
250
+ virtual ~_ValuesViewFunc() override {};
251
+ virtual std::vector<c10::SymInt> get_symints() const override;
252
+ virtual size_t num_symints() const override;
253
+ virtual std::vector<at::Tensor> get_tensors() const override;
254
+ virtual size_t num_tensors() const override;
255
+ virtual at::Tensor operator()(const at::Tensor&) const override;
256
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
257
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
258
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
259
+
260
+ protected:
261
+ virtual void set_symints(std::vector<c10::SymInt>) override;
262
+ virtual void set_tensors(std::vector<at::Tensor>) override;
263
+
264
+ private:
265
+
266
+ };
267
+
268
+ #define ALIAS_VIEW_FUNC_AVAILABLE
269
+ struct AliasViewFunc : public torch::autograd::ViewFunc {
270
+ AliasViewFunc()
271
+ {};
272
+ virtual ~AliasViewFunc() override {};
273
+ virtual std::vector<c10::SymInt> get_symints() const override;
274
+ virtual size_t num_symints() const override;
275
+ virtual std::vector<at::Tensor> get_tensors() const override;
276
+ virtual size_t num_tensors() const override;
277
+ virtual at::Tensor operator()(const at::Tensor&) const override;
278
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
279
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
280
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
281
+
282
+ protected:
283
+ virtual void set_symints(std::vector<c10::SymInt>) override;
284
+ virtual void set_tensors(std::vector<at::Tensor>) override;
285
+
286
+ private:
287
+
288
+ };
289
+
290
+ #define AS_STRIDED_VIEW_FUNC_AVAILABLE
291
+ struct AsStridedViewFunc : public torch::autograd::ViewFunc {
292
+ AsStridedViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) : size(size.vec()), stride(stride.vec()), storage_offset(storage_offset)
293
+ {};
294
+ virtual ~AsStridedViewFunc() override {};
295
+ virtual std::vector<c10::SymInt> get_symints() const override;
296
+ virtual size_t num_symints() const override;
297
+ virtual std::vector<at::Tensor> get_tensors() const override;
298
+ virtual size_t num_tensors() const override;
299
+ virtual at::Tensor operator()(const at::Tensor&) const override;
300
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
301
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
302
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
303
+
304
+ protected:
305
+ virtual void set_symints(std::vector<c10::SymInt>) override;
306
+ virtual void set_tensors(std::vector<at::Tensor>) override;
307
+
308
+ private:
309
+ ::std::vector<c10::SymInt> size;
310
+ ::std::vector<c10::SymInt> stride;
311
+ c10::optional<c10::SymInt> storage_offset;
312
+ };
313
+
314
+ #define CCOL_INDICES_VIEW_FUNC_AVAILABLE
315
+ struct CcolIndicesViewFunc : public torch::autograd::ViewFunc {
316
+ CcolIndicesViewFunc()
317
+ {};
318
+ virtual ~CcolIndicesViewFunc() override {};
319
+ virtual std::vector<c10::SymInt> get_symints() const override;
320
+ virtual size_t num_symints() const override;
321
+ virtual std::vector<at::Tensor> get_tensors() const override;
322
+ virtual size_t num_tensors() const override;
323
+ virtual at::Tensor operator()(const at::Tensor&) const override;
324
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
325
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
326
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
327
+
328
+ protected:
329
+ virtual void set_symints(std::vector<c10::SymInt>) override;
330
+ virtual void set_tensors(std::vector<at::Tensor>) override;
331
+
332
+ private:
333
+
334
+ };
335
+
336
+ #define CHUNK_VIEW_FUNC_AVAILABLE
337
+ struct ChunkViewFunc : public torch::autograd::ViewFunc {
338
+ ChunkViewFunc(int64_t chunks, int64_t dim, int64_t view_idx) : chunks(chunks), dim(dim), view_idx(view_idx)
339
+ {};
340
+ virtual ~ChunkViewFunc() override {};
341
+ virtual std::vector<c10::SymInt> get_symints() const override;
342
+ virtual size_t num_symints() const override;
343
+ virtual std::vector<at::Tensor> get_tensors() const override;
344
+ virtual size_t num_tensors() const override;
345
+ virtual at::Tensor operator()(const at::Tensor&) const override;
346
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
347
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
348
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
349
+
350
+ protected:
351
+ virtual void set_symints(std::vector<c10::SymInt>) override;
352
+ virtual void set_tensors(std::vector<at::Tensor>) override;
353
+
354
+ private:
355
+ int64_t chunks;
356
+ int64_t dim;
357
+ int64_t view_idx;
358
+ };
359
+
360
+ #define COL_INDICES_VIEW_FUNC_AVAILABLE
361
+ struct ColIndicesViewFunc : public torch::autograd::ViewFunc {
362
+ ColIndicesViewFunc()
363
+ {};
364
+ virtual ~ColIndicesViewFunc() override {};
365
+ virtual std::vector<c10::SymInt> get_symints() const override;
366
+ virtual size_t num_symints() const override;
367
+ virtual std::vector<at::Tensor> get_tensors() const override;
368
+ virtual size_t num_tensors() const override;
369
+ virtual at::Tensor operator()(const at::Tensor&) const override;
370
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
371
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
372
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
373
+
374
+ protected:
375
+ virtual void set_symints(std::vector<c10::SymInt>) override;
376
+ virtual void set_tensors(std::vector<at::Tensor>) override;
377
+
378
+ private:
379
+
380
+ };
381
+
382
+ #define CROW_INDICES_VIEW_FUNC_AVAILABLE
383
+ struct CrowIndicesViewFunc : public torch::autograd::ViewFunc {
384
+ CrowIndicesViewFunc()
385
+ {};
386
+ virtual ~CrowIndicesViewFunc() override {};
387
+ virtual std::vector<c10::SymInt> get_symints() const override;
388
+ virtual size_t num_symints() const override;
389
+ virtual std::vector<at::Tensor> get_tensors() const override;
390
+ virtual size_t num_tensors() const override;
391
+ virtual at::Tensor operator()(const at::Tensor&) const override;
392
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
393
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
394
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
395
+
396
+ protected:
397
+ virtual void set_symints(std::vector<c10::SymInt>) override;
398
+ virtual void set_tensors(std::vector<at::Tensor>) override;
399
+
400
+ private:
401
+
402
+ };
403
+
404
+ #define DIAGONAL_VIEW_FUNC_AVAILABLE
405
+ struct DiagonalViewFunc : public torch::autograd::ViewFunc {
406
+ DiagonalViewFunc(int64_t offset, int64_t dim1, int64_t dim2) : offset(offset), dim1(dim1), dim2(dim2)
407
+ {};
408
+ virtual ~DiagonalViewFunc() override {};
409
+ virtual std::vector<c10::SymInt> get_symints() const override;
410
+ virtual size_t num_symints() const override;
411
+ virtual std::vector<at::Tensor> get_tensors() const override;
412
+ virtual size_t num_tensors() const override;
413
+ virtual at::Tensor operator()(const at::Tensor&) const override;
414
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
415
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
416
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
417
+
418
+ protected:
419
+ virtual void set_symints(std::vector<c10::SymInt>) override;
420
+ virtual void set_tensors(std::vector<at::Tensor>) override;
421
+
422
+ private:
423
+ int64_t offset;
424
+ int64_t dim1;
425
+ int64_t dim2;
426
+ };
427
+
428
+ #define EXPAND_VIEW_FUNC_AVAILABLE
429
+ struct ExpandViewFunc : public torch::autograd::ViewFunc {
430
+ ExpandViewFunc(c10::SymIntArrayRef size, bool implicit) : size(size.vec()), implicit(implicit)
431
+ {};
432
+ virtual ~ExpandViewFunc() override {};
433
+ virtual std::vector<c10::SymInt> get_symints() const override;
434
+ virtual size_t num_symints() const override;
435
+ virtual std::vector<at::Tensor> get_tensors() const override;
436
+ virtual size_t num_tensors() const override;
437
+ virtual at::Tensor operator()(const at::Tensor&) const override;
438
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
439
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
440
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
441
+
442
+ protected:
443
+ virtual void set_symints(std::vector<c10::SymInt>) override;
444
+ virtual void set_tensors(std::vector<at::Tensor>) override;
445
+
446
+ private:
447
+ ::std::vector<c10::SymInt> size;
448
+ bool implicit;
449
+ };
450
+
451
+ #define INDICES_VIEW_FUNC_AVAILABLE
452
+ struct IndicesViewFunc : public torch::autograd::ViewFunc {
453
+ IndicesViewFunc()
454
+ {};
455
+ virtual ~IndicesViewFunc() override {};
456
+ virtual std::vector<c10::SymInt> get_symints() const override;
457
+ virtual size_t num_symints() const override;
458
+ virtual std::vector<at::Tensor> get_tensors() const override;
459
+ virtual size_t num_tensors() const override;
460
+ virtual at::Tensor operator()(const at::Tensor&) const override;
461
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
462
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
463
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
464
+
465
+ protected:
466
+ virtual void set_symints(std::vector<c10::SymInt>) override;
467
+ virtual void set_tensors(std::vector<at::Tensor>) override;
468
+
469
+ private:
470
+
471
+ };
472
+
473
+ #define NARROW_VIEW_FUNC_AVAILABLE
474
+ struct NarrowViewFunc : public torch::autograd::ViewFunc {
475
+ NarrowViewFunc(int64_t dim, c10::SymInt start, c10::SymInt length) : dim(dim), start(start), length(length)
476
+ {};
477
+ virtual ~NarrowViewFunc() override {};
478
+ virtual std::vector<c10::SymInt> get_symints() const override;
479
+ virtual size_t num_symints() const override;
480
+ virtual std::vector<at::Tensor> get_tensors() const override;
481
+ virtual size_t num_tensors() const override;
482
+ virtual at::Tensor operator()(const at::Tensor&) const override;
483
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
484
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
485
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
486
+
487
+ protected:
488
+ virtual void set_symints(std::vector<c10::SymInt>) override;
489
+ virtual void set_tensors(std::vector<at::Tensor>) override;
490
+
491
+ private:
492
+ int64_t dim;
493
+ c10::SymInt start;
494
+ c10::SymInt length;
495
+ };
496
+
497
+ #define PERMUTE_VIEW_FUNC_AVAILABLE
498
+ struct PermuteViewFunc : public torch::autograd::ViewFunc {
499
+ PermuteViewFunc(at::IntArrayRef dims) : dims(dims.vec())
500
+ {};
501
+ virtual ~PermuteViewFunc() override {};
502
+ virtual std::vector<c10::SymInt> get_symints() const override;
503
+ virtual size_t num_symints() const override;
504
+ virtual std::vector<at::Tensor> get_tensors() const override;
505
+ virtual size_t num_tensors() const override;
506
+ virtual at::Tensor operator()(const at::Tensor&) const override;
507
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
508
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
509
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
510
+
511
+ protected:
512
+ virtual void set_symints(std::vector<c10::SymInt>) override;
513
+ virtual void set_tensors(std::vector<at::Tensor>) override;
514
+
515
+ private:
516
+ ::std::vector<int64_t> dims;
517
+ };
518
+
519
+ #define ROW_INDICES_VIEW_FUNC_AVAILABLE
520
+ struct RowIndicesViewFunc : public torch::autograd::ViewFunc {
521
+ RowIndicesViewFunc()
522
+ {};
523
+ virtual ~RowIndicesViewFunc() override {};
524
+ virtual std::vector<c10::SymInt> get_symints() const override;
525
+ virtual size_t num_symints() const override;
526
+ virtual std::vector<at::Tensor> get_tensors() const override;
527
+ virtual size_t num_tensors() const override;
528
+ virtual at::Tensor operator()(const at::Tensor&) const override;
529
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
530
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
531
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
532
+
533
+ protected:
534
+ virtual void set_symints(std::vector<c10::SymInt>) override;
535
+ virtual void set_tensors(std::vector<at::Tensor>) override;
536
+
537
+ private:
538
+
539
+ };
540
+
541
+ #define SELECT_INT_VIEW_FUNC_AVAILABLE
542
+ struct SelectIntViewFunc : public torch::autograd::ViewFunc {
543
+ SelectIntViewFunc(int64_t dim, c10::SymInt index) : dim(dim), index(index)
544
+ {};
545
+ virtual ~SelectIntViewFunc() override {};
546
+ virtual std::vector<c10::SymInt> get_symints() const override;
547
+ virtual size_t num_symints() const override;
548
+ virtual std::vector<at::Tensor> get_tensors() const override;
549
+ virtual size_t num_tensors() const override;
550
+ virtual at::Tensor operator()(const at::Tensor&) const override;
551
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
552
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
553
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
554
+
555
+ protected:
556
+ virtual void set_symints(std::vector<c10::SymInt>) override;
557
+ virtual void set_tensors(std::vector<at::Tensor>) override;
558
+
559
+ private:
560
+ int64_t dim;
561
+ c10::SymInt index;
562
+ };
563
+
564
+ #define SLICE_TENSOR_VIEW_FUNC_AVAILABLE
565
+ struct SliceTensorViewFunc : public torch::autograd::ViewFunc {
566
+ SliceTensorViewFunc(int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) : dim(dim), start(start), end(end), step(step)
567
+ {};
568
+ virtual ~SliceTensorViewFunc() override {};
569
+ virtual std::vector<c10::SymInt> get_symints() const override;
570
+ virtual size_t num_symints() const override;
571
+ virtual std::vector<at::Tensor> get_tensors() const override;
572
+ virtual size_t num_tensors() const override;
573
+ virtual at::Tensor operator()(const at::Tensor&) const override;
574
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
575
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
576
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
577
+
578
+ protected:
579
+ virtual void set_symints(std::vector<c10::SymInt>) override;
580
+ virtual void set_tensors(std::vector<at::Tensor>) override;
581
+
582
+ private:
583
+ int64_t dim;
584
+ c10::optional<c10::SymInt> start;
585
+ c10::optional<c10::SymInt> end;
586
+ c10::SymInt step;
587
+ };
588
+
589
+ #define SLICE_INVERSE_VIEW_FUNC_AVAILABLE
590
+ struct SliceInverseViewFunc : public torch::autograd::ViewFunc {
591
+ SliceInverseViewFunc(const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) : src(src), dim(dim), start(start), end(end), step(step)
592
+ {};
593
+ virtual ~SliceInverseViewFunc() override {};
594
+ virtual std::vector<c10::SymInt> get_symints() const override;
595
+ virtual size_t num_symints() const override;
596
+ virtual std::vector<at::Tensor> get_tensors() const override;
597
+ virtual size_t num_tensors() const override;
598
+ virtual at::Tensor operator()(const at::Tensor&) const override;
599
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
600
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
601
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
602
+
603
+ protected:
604
+ virtual void set_symints(std::vector<c10::SymInt>) override;
605
+ virtual void set_tensors(std::vector<at::Tensor>) override;
606
+
607
+ private:
608
+ at::Tensor src;
609
+ int64_t dim;
610
+ c10::optional<c10::SymInt> start;
611
+ c10::optional<c10::SymInt> end;
612
+ c10::SymInt step;
613
+ };
614
+
615
+ #define SPLIT_TENSOR_VIEW_FUNC_AVAILABLE
616
+ struct SplitTensorViewFunc : public torch::autograd::ViewFunc {
617
+ SplitTensorViewFunc(c10::SymInt split_size, int64_t dim, int64_t view_idx) : split_size(split_size), dim(dim), view_idx(view_idx)
618
+ {};
619
+ virtual ~SplitTensorViewFunc() override {};
620
+ virtual std::vector<c10::SymInt> get_symints() const override;
621
+ virtual size_t num_symints() const override;
622
+ virtual std::vector<at::Tensor> get_tensors() const override;
623
+ virtual size_t num_tensors() const override;
624
+ virtual at::Tensor operator()(const at::Tensor&) const override;
625
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
626
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
627
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
628
+
629
+ protected:
630
+ virtual void set_symints(std::vector<c10::SymInt>) override;
631
+ virtual void set_tensors(std::vector<at::Tensor>) override;
632
+
633
+ private:
634
+ c10::SymInt split_size;
635
+ int64_t dim;
636
+ int64_t view_idx;
637
+ };
638
+
639
+ #define SPLIT_WITH_SIZES_VIEW_FUNC_AVAILABLE
640
+ struct SplitWithSizesViewFunc : public torch::autograd::ViewFunc {
641
+ SplitWithSizesViewFunc(c10::SymIntArrayRef split_sizes, int64_t dim, int64_t view_idx) : split_sizes(split_sizes.vec()), dim(dim), view_idx(view_idx)
642
+ {};
643
+ virtual ~SplitWithSizesViewFunc() override {};
644
+ virtual std::vector<c10::SymInt> get_symints() const override;
645
+ virtual size_t num_symints() const override;
646
+ virtual std::vector<at::Tensor> get_tensors() const override;
647
+ virtual size_t num_tensors() const override;
648
+ virtual at::Tensor operator()(const at::Tensor&) const override;
649
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
650
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
651
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
652
+
653
+ protected:
654
+ virtual void set_symints(std::vector<c10::SymInt>) override;
655
+ virtual void set_tensors(std::vector<at::Tensor>) override;
656
+
657
+ private:
658
+ ::std::vector<c10::SymInt> split_sizes;
659
+ int64_t dim;
660
+ int64_t view_idx;
661
+ };
662
+
663
+ #define SQUEEZE_VIEW_FUNC_AVAILABLE
664
+ struct SqueezeViewFunc : public torch::autograd::ViewFunc {
665
+ SqueezeViewFunc()
666
+ {};
667
+ virtual ~SqueezeViewFunc() override {};
668
+ virtual std::vector<c10::SymInt> get_symints() const override;
669
+ virtual size_t num_symints() const override;
670
+ virtual std::vector<at::Tensor> get_tensors() const override;
671
+ virtual size_t num_tensors() const override;
672
+ virtual at::Tensor operator()(const at::Tensor&) const override;
673
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
674
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
675
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
676
+
677
+ protected:
678
+ virtual void set_symints(std::vector<c10::SymInt>) override;
679
+ virtual void set_tensors(std::vector<at::Tensor>) override;
680
+
681
+ private:
682
+
683
+ };
684
+
685
+ #define SQUEEZE_DIM_VIEW_FUNC_AVAILABLE
686
+ struct SqueezeDimViewFunc : public torch::autograd::ViewFunc {
687
+ SqueezeDimViewFunc(int64_t dim) : dim(dim)
688
+ {};
689
+ virtual ~SqueezeDimViewFunc() override {};
690
+ virtual std::vector<c10::SymInt> get_symints() const override;
691
+ virtual size_t num_symints() const override;
692
+ virtual std::vector<at::Tensor> get_tensors() const override;
693
+ virtual size_t num_tensors() const override;
694
+ virtual at::Tensor operator()(const at::Tensor&) const override;
695
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
696
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
697
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
698
+
699
+ protected:
700
+ virtual void set_symints(std::vector<c10::SymInt>) override;
701
+ virtual void set_tensors(std::vector<at::Tensor>) override;
702
+
703
+ private:
704
+ int64_t dim;
705
+ };
706
+
707
+ #define SQUEEZE_DIMS_VIEW_FUNC_AVAILABLE
708
+ struct SqueezeDimsViewFunc : public torch::autograd::ViewFunc {
709
+ SqueezeDimsViewFunc(at::IntArrayRef dim) : dim(dim.vec())
710
+ {};
711
+ virtual ~SqueezeDimsViewFunc() override {};
712
+ virtual std::vector<c10::SymInt> get_symints() const override;
713
+ virtual size_t num_symints() const override;
714
+ virtual std::vector<at::Tensor> get_tensors() const override;
715
+ virtual size_t num_tensors() const override;
716
+ virtual at::Tensor operator()(const at::Tensor&) const override;
717
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
718
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
719
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
720
+
721
+ protected:
722
+ virtual void set_symints(std::vector<c10::SymInt>) override;
723
+ virtual void set_tensors(std::vector<at::Tensor>) override;
724
+
725
+ private:
726
+ ::std::vector<int64_t> dim;
727
+ };
728
+
729
+ #define T_VIEW_FUNC_AVAILABLE
730
+ struct TViewFunc : public torch::autograd::ViewFunc {
731
+ TViewFunc()
732
+ {};
733
+ virtual ~TViewFunc() override {};
734
+ virtual std::vector<c10::SymInt> get_symints() const override;
735
+ virtual size_t num_symints() const override;
736
+ virtual std::vector<at::Tensor> get_tensors() const override;
737
+ virtual size_t num_tensors() const override;
738
+ virtual at::Tensor operator()(const at::Tensor&) const override;
739
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
740
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
741
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
742
+
743
+ protected:
744
+ virtual void set_symints(std::vector<c10::SymInt>) override;
745
+ virtual void set_tensors(std::vector<at::Tensor>) override;
746
+
747
+ private:
748
+
749
+ };
750
+
751
+ #define TRANSPOSE_INT_VIEW_FUNC_AVAILABLE
752
+ struct TransposeIntViewFunc : public torch::autograd::ViewFunc {
753
+ TransposeIntViewFunc(int64_t dim0, int64_t dim1) : dim0(dim0), dim1(dim1)
754
+ {};
755
+ virtual ~TransposeIntViewFunc() override {};
756
+ virtual std::vector<c10::SymInt> get_symints() const override;
757
+ virtual size_t num_symints() const override;
758
+ virtual std::vector<at::Tensor> get_tensors() const override;
759
+ virtual size_t num_tensors() const override;
760
+ virtual at::Tensor operator()(const at::Tensor&) const override;
761
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
762
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
763
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
764
+
765
+ protected:
766
+ virtual void set_symints(std::vector<c10::SymInt>) override;
767
+ virtual void set_tensors(std::vector<at::Tensor>) override;
768
+
769
+ private:
770
+ int64_t dim0;
771
+ int64_t dim1;
772
+ };
773
+
774
+ #define UNBIND_INT_VIEW_FUNC_AVAILABLE
775
+ struct UnbindIntViewFunc : public torch::autograd::ViewFunc {
776
+ UnbindIntViewFunc(int64_t dim, int64_t view_idx) : dim(dim), view_idx(view_idx)
777
+ {};
778
+ virtual ~UnbindIntViewFunc() override {};
779
+ virtual std::vector<c10::SymInt> get_symints() const override;
780
+ virtual size_t num_symints() const override;
781
+ virtual std::vector<at::Tensor> get_tensors() const override;
782
+ virtual size_t num_tensors() const override;
783
+ virtual at::Tensor operator()(const at::Tensor&) const override;
784
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
785
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
786
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
787
+
788
+ protected:
789
+ virtual void set_symints(std::vector<c10::SymInt>) override;
790
+ virtual void set_tensors(std::vector<at::Tensor>) override;
791
+
792
+ private:
793
+ int64_t dim;
794
+ int64_t view_idx;
795
+ };
796
+
797
+ #define UNFOLD_VIEW_FUNC_AVAILABLE
798
+ struct UnfoldViewFunc : public torch::autograd::ViewFunc {
799
+ UnfoldViewFunc(int64_t dimension, int64_t size, int64_t step) : dimension(dimension), size(size), step(step)
800
+ {};
801
+ virtual ~UnfoldViewFunc() override {};
802
+ virtual std::vector<c10::SymInt> get_symints() const override;
803
+ virtual size_t num_symints() const override;
804
+ virtual std::vector<at::Tensor> get_tensors() const override;
805
+ virtual size_t num_tensors() const override;
806
+ virtual at::Tensor operator()(const at::Tensor&) const override;
807
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
808
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
809
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
810
+
811
+ protected:
812
+ virtual void set_symints(std::vector<c10::SymInt>) override;
813
+ virtual void set_tensors(std::vector<at::Tensor>) override;
814
+
815
+ private:
816
+ int64_t dimension;
817
+ int64_t size;
818
+ int64_t step;
819
+ };
820
+
821
+ #define UNSQUEEZE_VIEW_FUNC_AVAILABLE
822
+ struct UnsqueezeViewFunc : public torch::autograd::ViewFunc {
823
+ UnsqueezeViewFunc(int64_t dim) : dim(dim)
824
+ {};
825
+ virtual ~UnsqueezeViewFunc() override {};
826
+ virtual std::vector<c10::SymInt> get_symints() const override;
827
+ virtual size_t num_symints() const override;
828
+ virtual std::vector<at::Tensor> get_tensors() const override;
829
+ virtual size_t num_tensors() const override;
830
+ virtual at::Tensor operator()(const at::Tensor&) const override;
831
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
832
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
833
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
834
+
835
+ protected:
836
+ virtual void set_symints(std::vector<c10::SymInt>) override;
837
+ virtual void set_tensors(std::vector<at::Tensor>) override;
838
+
839
+ private:
840
+ int64_t dim;
841
+ };
842
+
843
+ #define VALUES_VIEW_FUNC_AVAILABLE
844
+ struct ValuesViewFunc : public torch::autograd::ViewFunc {
845
+ ValuesViewFunc()
846
+ {};
847
+ virtual ~ValuesViewFunc() override {};
848
+ virtual std::vector<c10::SymInt> get_symints() const override;
849
+ virtual size_t num_symints() const override;
850
+ virtual std::vector<at::Tensor> get_tensors() const override;
851
+ virtual size_t num_tensors() const override;
852
+ virtual at::Tensor operator()(const at::Tensor&) const override;
853
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
854
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
855
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
856
+
857
+ protected:
858
+ virtual void set_symints(std::vector<c10::SymInt>) override;
859
+ virtual void set_tensors(std::vector<at::Tensor>) override;
860
+
861
+ private:
862
+
863
+ };
864
+
865
+ #define VIEW_VIEW_FUNC_AVAILABLE
866
+ struct ViewViewFunc : public torch::autograd::ViewFunc {
867
+ ViewViewFunc(c10::SymIntArrayRef size) : size(size.vec())
868
+ {};
869
+ virtual ~ViewViewFunc() override {};
870
+ virtual std::vector<c10::SymInt> get_symints() const override;
871
+ virtual size_t num_symints() const override;
872
+ virtual std::vector<at::Tensor> get_tensors() const override;
873
+ virtual size_t num_tensors() const override;
874
+ virtual at::Tensor operator()(const at::Tensor&) const override;
875
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
876
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
877
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
878
+
879
+ protected:
880
+ virtual void set_symints(std::vector<c10::SymInt>) override;
881
+ virtual void set_tensors(std::vector<at::Tensor>) override;
882
+
883
+ private:
884
+ ::std::vector<c10::SymInt> size;
885
+ };
886
+
887
+ #define VIEW_DTYPE_VIEW_FUNC_AVAILABLE
888
+ struct ViewDtypeViewFunc : public torch::autograd::ViewFunc {
889
+ ViewDtypeViewFunc(at::ScalarType dtype) : dtype(dtype)
890
+ {};
891
+ virtual ~ViewDtypeViewFunc() override {};
892
+ virtual std::vector<c10::SymInt> get_symints() const override;
893
+ virtual size_t num_symints() const override;
894
+ virtual std::vector<at::Tensor> get_tensors() const override;
895
+ virtual size_t num_tensors() const override;
896
+ virtual at::Tensor operator()(const at::Tensor&) const override;
897
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
898
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
899
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
900
+
901
+ protected:
902
+ virtual void set_symints(std::vector<c10::SymInt>) override;
903
+ virtual void set_tensors(std::vector<at::Tensor>) override;
904
+
905
+ private:
906
+ at::ScalarType dtype;
907
+ };
908
+
909
+ #define VIEW_AS_COMPLEX_VIEW_FUNC_AVAILABLE
910
+ struct ViewAsComplexViewFunc : public torch::autograd::ViewFunc {
911
+ ViewAsComplexViewFunc()
912
+ {};
913
+ virtual ~ViewAsComplexViewFunc() override {};
914
+ virtual std::vector<c10::SymInt> get_symints() const override;
915
+ virtual size_t num_symints() const override;
916
+ virtual std::vector<at::Tensor> get_tensors() const override;
917
+ virtual size_t num_tensors() const override;
918
+ virtual at::Tensor operator()(const at::Tensor&) const override;
919
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
920
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
921
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
922
+
923
+ protected:
924
+ virtual void set_symints(std::vector<c10::SymInt>) override;
925
+ virtual void set_tensors(std::vector<at::Tensor>) override;
926
+
927
+ private:
928
+
929
+ };
930
+
931
+ #define VIEW_AS_REAL_VIEW_FUNC_AVAILABLE
932
+ struct ViewAsRealViewFunc : public torch::autograd::ViewFunc {
933
+ ViewAsRealViewFunc()
934
+ {};
935
+ virtual ~ViewAsRealViewFunc() override {};
936
+ virtual std::vector<c10::SymInt> get_symints() const override;
937
+ virtual size_t num_symints() const override;
938
+ virtual std::vector<at::Tensor> get_tensors() const override;
939
+ virtual size_t num_tensors() const override;
940
+ virtual at::Tensor operator()(const at::Tensor&) const override;
941
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
942
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
943
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
944
+
945
+ protected:
946
+ virtual void set_symints(std::vector<c10::SymInt>) override;
947
+ virtual void set_tensors(std::vector<at::Tensor>) override;
948
+
949
+ private:
950
+
951
+ };
952
+
953
+ } // namespace torch::autograd::generated
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <Python.h>
4
+
5
+ // @generated from ../tools/autograd/templates/python_functions.h
6
+
7
+ // Python bindings for automatically generated autograd functions
8
+
9
+ namespace torch { namespace autograd { namespace generated {
10
+
11
+ void initialize_autogenerated_functions_0(PyObject* module);
12
+ void initialize_autogenerated_functions_1(PyObject* module);
13
+ void initialize_autogenerated_functions_2(PyObject* module);
14
+ void initialize_autogenerated_functions_3(PyObject* module);
15
+ void initialize_autogenerated_functions_4(PyObject* module);
16
+
17
+ inline void initialize_autogenerated_functions(PyObject* module) {
18
+ initialize_autogenerated_functions_0(module);
19
+ initialize_autogenerated_functions_1(module);
20
+ initialize_autogenerated_functions_2(module);
21
+ initialize_autogenerated_functions_3(module);
22
+ initialize_autogenerated_functions_4(module);
23
+ }
24
+
25
+ }}} // namespace torch::autograd::generated
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace autograd {
5
+ namespace generated {
6
+
7
+ PyTypeObject* get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_structseq();
8
+ PyTypeObject* get__fused_moving_avg_obs_fq_helper_structseq();
9
+ PyTypeObject* get__linalg_det_structseq();
10
+ PyTypeObject* get__linalg_det_out_structseq();
11
+ PyTypeObject* get__linalg_eigh_structseq();
12
+ PyTypeObject* get__linalg_eigh_out_structseq();
13
+ PyTypeObject* get__linalg_slogdet_structseq();
14
+ PyTypeObject* get__linalg_slogdet_out_structseq();
15
+ PyTypeObject* get__linalg_solve_ex_structseq();
16
+ PyTypeObject* get__linalg_solve_ex_out_structseq();
17
+ PyTypeObject* get__linalg_svd_structseq();
18
+ PyTypeObject* get__linalg_svd_out_structseq();
19
+ PyTypeObject* get__lu_with_info_structseq();
20
+ PyTypeObject* get__scaled_dot_product_cudnn_attention_structseq();
21
+ PyTypeObject* get__scaled_dot_product_efficient_attention_structseq();
22
+ PyTypeObject* get__scaled_dot_product_flash_attention_structseq();
23
+ PyTypeObject* get__scaled_dot_product_flash_attention_for_cpu_structseq();
24
+ PyTypeObject* get__unpack_dual_structseq();
25
+ PyTypeObject* get_aminmax_structseq();
26
+ PyTypeObject* get_aminmax_out_structseq();
27
+ PyTypeObject* get_cummax_structseq();
28
+ PyTypeObject* get_cummax_out_structseq();
29
+ PyTypeObject* get_cummin_structseq();
30
+ PyTypeObject* get_cummin_out_structseq();
31
+ PyTypeObject* get_frexp_structseq();
32
+ PyTypeObject* get_frexp_out_structseq();
33
+ PyTypeObject* get_geqrf_out_structseq();
34
+ PyTypeObject* get_geqrf_structseq();
35
+ PyTypeObject* get_histogram_out_structseq();
36
+ PyTypeObject* get_histogram_structseq();
37
+ PyTypeObject* get_histogramdd_structseq();
38
+ PyTypeObject* get_kthvalue_structseq();
39
+ PyTypeObject* get_kthvalue_out_structseq();
40
+ PyTypeObject* get_linalg_cholesky_ex_structseq();
41
+ PyTypeObject* get_linalg_cholesky_ex_out_structseq();
42
+ PyTypeObject* get_linalg_eig_structseq();
43
+ PyTypeObject* get_linalg_eig_out_structseq();
44
+ PyTypeObject* get_linalg_eigh_structseq();
45
+ PyTypeObject* get_linalg_eigh_out_structseq();
46
+ PyTypeObject* get_linalg_inv_ex_structseq();
47
+ PyTypeObject* get_linalg_inv_ex_out_structseq();
48
+ PyTypeObject* get_linalg_ldl_factor_structseq();
49
+ PyTypeObject* get_linalg_ldl_factor_out_structseq();
50
+ PyTypeObject* get_linalg_ldl_factor_ex_structseq();
51
+ PyTypeObject* get_linalg_ldl_factor_ex_out_structseq();
52
+ PyTypeObject* get_linalg_lstsq_structseq();
53
+ PyTypeObject* get_linalg_lstsq_out_structseq();
54
+ PyTypeObject* get_linalg_lu_structseq();
55
+ PyTypeObject* get_linalg_lu_out_structseq();
56
+ PyTypeObject* get_linalg_lu_factor_structseq();
57
+ PyTypeObject* get_linalg_lu_factor_out_structseq();
58
+ PyTypeObject* get_linalg_lu_factor_ex_structseq();
59
+ PyTypeObject* get_linalg_lu_factor_ex_out_structseq();
60
+ PyTypeObject* get_linalg_qr_structseq();
61
+ PyTypeObject* get_linalg_qr_out_structseq();
62
+ PyTypeObject* get_linalg_slogdet_structseq();
63
+ PyTypeObject* get_linalg_slogdet_out_structseq();
64
+ PyTypeObject* get_linalg_solve_ex_structseq();
65
+ PyTypeObject* get_linalg_solve_ex_out_structseq();
66
+ PyTypeObject* get_linalg_svd_structseq();
67
+ PyTypeObject* get_linalg_svd_out_structseq();
68
+ PyTypeObject* get_lu_unpack_structseq();
69
+ PyTypeObject* get_lu_unpack_out_structseq();
70
+ PyTypeObject* get_max_structseq();
71
+ PyTypeObject* get_max_out_structseq();
72
+ PyTypeObject* get_median_structseq();
73
+ PyTypeObject* get_median_out_structseq();
74
+ PyTypeObject* get_min_structseq();
75
+ PyTypeObject* get_min_out_structseq();
76
+ PyTypeObject* get_mode_structseq();
77
+ PyTypeObject* get_mode_out_structseq();
78
+ PyTypeObject* get_nanmedian_structseq();
79
+ PyTypeObject* get_nanmedian_out_structseq();
80
+ PyTypeObject* get_qr_out_structseq();
81
+ PyTypeObject* get_qr_structseq();
82
+ PyTypeObject* get_slogdet_structseq();
83
+ PyTypeObject* get_slogdet_out_structseq();
84
+ PyTypeObject* get_sort_out_structseq();
85
+ PyTypeObject* get_sort_structseq();
86
+ PyTypeObject* get_svd_out_structseq();
87
+ PyTypeObject* get_svd_structseq();
88
+ PyTypeObject* get_topk_out_structseq();
89
+ PyTypeObject* get_topk_structseq();
90
+ PyTypeObject* get_triangular_solve_out_structseq();
91
+ PyTypeObject* get_triangular_solve_structseq();
92
+
93
+ }
94
+
95
+ void initReturnTypes(PyObject* module);
96
+
97
+ } // namespace autograd
98
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated from ../tools/autograd/templates/variable_factories.h
4
+
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/TracerMode.h>
7
+ #include <ATen/core/grad_mode.h>
8
+ #include <c10/util/ArrayRef.h>
9
+ #include <c10/core/MemoryFormat.h>
10
+ #include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
11
+ #include <torch/csrc/autograd/variable.h>
12
+
13
+ #ifndef AT_PER_OPERATOR_HEADERS
14
+ #include <ATen/Functions.h>
15
+ #else
16
+ #include <ATen/ops/from_blob.h>
17
+ #include <ATen/ops/_make_dep_token.h>
18
+ #include <ATen/ops/_cudnn_init_dropout_state.h>
19
+ #include <ATen/ops/arange.h>
20
+ #include <ATen/ops/arange.h>
21
+ #include <ATen/ops/arange.h>
22
+ #include <ATen/ops/bartlett_window.h>
23
+ #include <ATen/ops/bartlett_window.h>
24
+ #include <ATen/ops/blackman_window.h>
25
+ #include <ATen/ops/blackman_window.h>
26
+ #include <ATen/ops/empty.h>
27
+ #include <ATen/ops/empty.h>
28
+ #include <ATen/ops/empty_permuted.h>
29
+ #include <ATen/ops/_empty_affine_quantized.h>
30
+ #include <ATen/ops/_empty_per_channel_affine_quantized.h>
31
+ #include <ATen/ops/empty_quantized.h>
32
+ #include <ATen/ops/empty_like.h>
33
+ #include <ATen/ops/empty_strided.h>
34
+ #include <ATen/ops/eye.h>
35
+ #include <ATen/ops/eye.h>
36
+ #include <ATen/ops/full.h>
37
+ #include <ATen/ops/full.h>
38
+ #include <ATen/ops/full_like.h>
39
+ #include <ATen/ops/from_file.h>
40
+ #include <ATen/ops/hann_window.h>
41
+ #include <ATen/ops/hann_window.h>
42
+ #include <ATen/ops/hamming_window.h>
43
+ #include <ATen/ops/hamming_window.h>
44
+ #include <ATen/ops/hamming_window.h>
45
+ #include <ATen/ops/hamming_window.h>
46
+ #include <ATen/ops/kaiser_window.h>
47
+ #include <ATen/ops/kaiser_window.h>
48
+ #include <ATen/ops/kaiser_window.h>
49
+ #include <ATen/ops/linspace.h>
50
+ #include <ATen/ops/linspace.h>
51
+ #include <ATen/ops/linspace.h>
52
+ #include <ATen/ops/linspace.h>
53
+ #include <ATen/ops/logspace.h>
54
+ #include <ATen/ops/logspace.h>
55
+ #include <ATen/ops/logspace.h>
56
+ #include <ATen/ops/logspace.h>
57
+ #include <ATen/ops/ones.h>
58
+ #include <ATen/ops/ones.h>
59
+ #include <ATen/ops/ones_like.h>
60
+ #include <ATen/ops/scalar_tensor.h>
61
+ #include <ATen/ops/rand.h>
62
+ #include <ATen/ops/rand.h>
63
+ #include <ATen/ops/rand.h>
64
+ #include <ATen/ops/rand.h>
65
+ #include <ATen/ops/rand_like.h>
66
+ #include <ATen/ops/randint.h>
67
+ #include <ATen/ops/randint.h>
68
+ #include <ATen/ops/randint.h>
69
+ #include <ATen/ops/randint.h>
70
+ #include <ATen/ops/randint_like.h>
71
+ #include <ATen/ops/randint_like.h>
72
+ #include <ATen/ops/randn.h>
73
+ #include <ATen/ops/randn.h>
74
+ #include <ATen/ops/randn.h>
75
+ #include <ATen/ops/randn.h>
76
+ #include <ATen/ops/randn_like.h>
77
+ #include <ATen/ops/randperm.h>
78
+ #include <ATen/ops/randperm.h>
79
+ #include <ATen/ops/range.h>
80
+ #include <ATen/ops/range.h>
81
+ #include <ATen/ops/zeros.h>
82
+ #include <ATen/ops/_efficientzerotensor.h>
83
+ #include <ATen/ops/zeros.h>
84
+ #include <ATen/ops/zeros_like.h>
85
+ #include <ATen/ops/sparse_compressed_tensor.h>
86
+ #include <ATen/ops/sparse_csr_tensor.h>
87
+ #include <ATen/ops/sparse_csc_tensor.h>
88
+ #include <ATen/ops/sparse_bsr_tensor.h>
89
+ #include <ATen/ops/sparse_bsc_tensor.h>
90
+ #include <ATen/ops/sparse_compressed_tensor.h>
91
+ #include <ATen/ops/sparse_csr_tensor.h>
92
+ #include <ATen/ops/sparse_csc_tensor.h>
93
+ #include <ATen/ops/sparse_bsr_tensor.h>
94
+ #include <ATen/ops/sparse_bsc_tensor.h>
95
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
96
+ #include <ATen/ops/_sparse_csr_tensor_unsafe.h>
97
+ #include <ATen/ops/_sparse_csc_tensor_unsafe.h>
98
+ #include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
99
+ #include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
100
+ #include <ATen/ops/sparse_coo_tensor.h>
101
+ #include <ATen/ops/sparse_coo_tensor.h>
102
+ #include <ATen/ops/sparse_coo_tensor.h>
103
+ #include <ATen/ops/_sparse_coo_tensor_unsafe.h>
104
+ #include <ATen/ops/_sparse_coo_tensor_with_dims.h>
105
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
106
+ #include <ATen/ops/_to_copy.h>
107
+ #include <ATen/ops/tril_indices.h>
108
+ #include <ATen/ops/triu_indices.h>
109
+ #include <ATen/ops/normal.h>
110
+ #include <ATen/ops/fft_fftfreq.h>
111
+ #include <ATen/ops/fft_rfftfreq.h>
112
+ #endif
113
+
114
+ #include <functional>
115
+ #include <initializer_list>
116
+ #include <utility>
117
+
118
+ namespace torch {
119
+
120
+ /// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types
121
+ /// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to
122
+ /// support it in the future by iterating over all sub-lists to find
123
+ /// the largest data type that can represent all of the elements, or by using
124
+ /// variadic templates.
125
+ ///
126
+ /// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` /
127
+ /// (nested) braced-init-list of floating-point types always produces a tensor of dtype
128
+ /// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior.
129
+ ///
130
+ /// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` /
131
+ /// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong`
132
+ /// (aka. int64_t), matching Python `torch.tensor` behavior.
133
+ ///
134
+ /// NOTE: The following dtypes are not supported by `torch::tensor` currently:
135
+ /// - `unsigned int`
136
+ /// - `unsigned long int`
137
+ /// - `unsigned long long int`
138
+ /// - `long long int`
139
+ inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) {
140
+ return autograd::make_variable(
141
+ // note: we remove the requires_grad setting from the TensorOptions because
142
+ // it is ignored anyways (and we actually have an assertion that it isn't set
143
+ // which would fail otherwise). We handle requires_grad explicitly here
144
+ // instead of passing it through to the kernel.
145
+ tensor_data_container.convert_to_tensor(options.requires_grad(c10::nullopt)),
146
+ options.requires_grad());
147
+ }
148
+
149
+ /// A generic deleter function.
150
+ using Deleter = std::function<void(void*)>;
151
+ using at::MemoryFormat;
152
+
153
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
154
+ /// original data. `sizes` should specify the shape of the tensor, `strides` the
155
+ /// stride in each dimension. The `deleter` function (a
156
+ /// `std::function<void(void*)>`) will be called on the `data` when the Tensor
157
+ /// data would normally be deallocated. The `TensorOptions` specify additional
158
+ /// configuration options for the returned tensor, such as what type to
159
+ /// interpret the `data` as.
160
+ inline at::Tensor from_blob(
161
+ void* data,
162
+ at::IntArrayRef sizes,
163
+ at::IntArrayRef strides,
164
+ const Deleter& deleter,
165
+ const at::TensorOptions& options = at::TensorOptions()) {
166
+ at::Tensor tensor = ([&]() {
167
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
168
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
169
+ return at::from_blob(data, sizes, strides, deleter, options.requires_grad(c10::nullopt));
170
+ })();
171
+ return autograd::make_variable(tensor, options.requires_grad());
172
+ }
173
+
174
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
175
+ /// original data. `sizes` should specify the shape of the tensor, `strides` the
176
+ /// stride in each dimension. The `TensorOptions`
177
+ /// specify additional configuration options for the returned tensor, such as
178
+ /// what type to interpret the `data` as.
179
+ inline at::Tensor from_blob(
180
+ void* data,
181
+ at::IntArrayRef sizes,
182
+ at::IntArrayRef strides,
183
+ const at::TensorOptions& options = at::TensorOptions()) {
184
+ at::Tensor tensor = ([&]() {
185
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
186
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
187
+ return at::from_blob(data, sizes, strides, options.requires_grad(c10::nullopt));
188
+ })();
189
+ return autograd::make_variable(tensor, options.requires_grad());
190
+ }
191
+
192
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
193
+ /// original data. `sizes` should specify the shape of the tensor. The `deleter`
194
+ /// (a `std::function<void(void*)>`) function will be called on the `data` when
195
+ /// the Tensor data would normally be deallocated. The `TensorOptions` specify
196
+ /// additional configuration options for the returned tensor, such as what type
197
+ /// to interpret the `data` as.
198
+ inline at::Tensor from_blob(
199
+ void* data,
200
+ at::IntArrayRef sizes,
201
+ const Deleter& deleter,
202
+ const at::TensorOptions& options = at::TensorOptions()) {
203
+ at::Tensor tensor = ([&]() {
204
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
205
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
206
+ return at::from_blob(data, sizes, deleter, options.requires_grad(c10::nullopt));
207
+ })();
208
+ return autograd::make_variable(tensor, options.requires_grad());
209
+ }
210
+
211
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
212
+ /// original data. `sizes` should specify the shape of the tensor. The
213
+ /// `TensorOptions` specify additional configuration options for the returned
214
+ /// tensor, such as what type to interpret the `data` as.
215
+ inline at::Tensor from_blob(
216
+ void* data,
217
+ at::IntArrayRef sizes,
218
+ const at::TensorOptions& options = at::TensorOptions()) {
219
+ at::Tensor tensor = ([&]() {
220
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
221
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
222
+ return at::from_blob(data, sizes, options.requires_grad(c10::nullopt));
223
+ })();
224
+ return autograd::make_variable(tensor, options.requires_grad());
225
+ }
226
+
227
+ inline at::Tensor _make_dep_token(at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
228
+ at::AutoDispatchBelowADInplaceOrView guard;
229
+ return autograd::make_variable(at::_make_dep_token(at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
230
+ }
231
+ inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) {
232
+ at::AutoDispatchBelowADInplaceOrView guard;
233
+ return autograd::make_variable(at::_cudnn_init_dropout_state(dropout, train, dropout_seed, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
234
+ }
235
+ inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options = {}) {
236
+ at::AutoDispatchBelowADInplaceOrView guard;
237
+ return autograd::make_variable(at::arange(end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
238
+ }
239
+ inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
240
+ at::AutoDispatchBelowADInplaceOrView guard;
241
+ return autograd::make_variable(at::arange(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
242
+ }
243
+ inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options = {}) {
244
+ at::AutoDispatchBelowADInplaceOrView guard;
245
+ return autograd::make_variable(at::arange(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
246
+ }
247
+ inline at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options = {}) {
248
+ at::AutoDispatchBelowADInplaceOrView guard;
249
+ return autograd::make_variable(at::bartlett_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
250
+ }
251
+ inline at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
252
+ at::AutoDispatchBelowADInplaceOrView guard;
253
+ return autograd::make_variable(at::bartlett_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
254
+ }
255
+ inline at::Tensor blackman_window(int64_t window_length, at::TensorOptions options = {}) {
256
+ at::AutoDispatchBelowADInplaceOrView guard;
257
+ return autograd::make_variable(at::blackman_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
258
+ }
259
+ inline at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
260
+ at::AutoDispatchBelowADInplaceOrView guard;
261
+ return autograd::make_variable(at::blackman_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
262
+ }
263
+ inline at::Tensor empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
264
+ at::AutoDispatchBelowADInplaceOrView guard;
265
+ return autograd::make_variable(at::empty(size, names, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
266
+ }
267
+ inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
268
+ at::AutoDispatchBelowADInplaceOrView guard;
269
+ return autograd::make_variable(at::empty(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
270
+ }
271
+ inline at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
272
+ at::AutoDispatchBelowADInplaceOrView guard;
273
+ return autograd::make_variable(at::empty_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
274
+ }
275
+ inline at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
276
+ at::AutoDispatchBelowADInplaceOrView guard;
277
+ return autograd::make_variable(at::empty_permuted(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
278
+ }
279
+ inline at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
280
+ at::AutoDispatchBelowADInplaceOrView guard;
281
+ return autograd::make_variable(at::empty_permuted_symint(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
282
+ }
283
+ inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
284
+ at::AutoDispatchBelowADInplaceOrView guard;
285
+ return autograd::make_variable(at::_empty_affine_quantized(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
286
+ }
287
+ inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
288
+ at::AutoDispatchBelowADInplaceOrView guard;
289
+ return autograd::make_variable(at::_empty_affine_quantized_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
290
+ }
291
+ inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
292
+ at::AutoDispatchBelowADInplaceOrView guard;
293
+ return autograd::make_variable(at::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
294
+ }
295
+ inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
296
+ at::AutoDispatchBelowADInplaceOrView guard;
297
+ return autograd::make_variable(at::_empty_per_channel_affine_quantized_symint(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
298
+ }
299
+ inline at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
300
+ at::AutoDispatchBelowADInplaceOrView guard;
301
+ return autograd::make_variable(at::empty_quantized(size, qtensor, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
302
+ }
303
+ inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
304
+ at::AutoDispatchBelowADInplaceOrView guard;
305
+ return autograd::make_variable(at::empty_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
306
+ }
307
+ inline at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options = {}) {
308
+ at::AutoDispatchBelowADInplaceOrView guard;
309
+ return autograd::make_variable(at::empty_strided(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
310
+ }
311
+ inline at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options = {}) {
312
+ at::AutoDispatchBelowADInplaceOrView guard;
313
+ return autograd::make_variable(at::empty_strided_symint(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
314
+ }
315
+ inline at::Tensor eye(int64_t n, at::TensorOptions options = {}) {
316
+ at::AutoDispatchBelowADInplaceOrView guard;
317
+ return autograd::make_variable(at::eye(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
318
+ }
319
+ inline at::Tensor eye_symint(c10::SymInt n, at::TensorOptions options = {}) {
320
+ at::AutoDispatchBelowADInplaceOrView guard;
321
+ return autograd::make_variable(at::eye_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
322
+ }
323
+ inline at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options = {}) {
324
+ at::AutoDispatchBelowADInplaceOrView guard;
325
+ return autograd::make_variable(at::eye(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
326
+ }
327
+ inline at::Tensor eye_symint(c10::SymInt n, c10::SymInt m, at::TensorOptions options = {}) {
328
+ at::AutoDispatchBelowADInplaceOrView guard;
329
+ return autograd::make_variable(at::eye_symint(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
330
+ }
331
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
332
+ at::AutoDispatchBelowADInplaceOrView guard;
333
+ return autograd::make_variable(at::full(size, fill_value, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
334
+ }
335
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
336
+ at::AutoDispatchBelowADInplaceOrView guard;
337
+ return autograd::make_variable(at::full(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
338
+ }
339
+ inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
340
+ at::AutoDispatchBelowADInplaceOrView guard;
341
+ return autograd::make_variable(at::full_symint(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
342
+ }
343
+ inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
344
+ at::AutoDispatchBelowADInplaceOrView guard;
345
+ return autograd::make_variable(at::full_like(self, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
346
+ }
347
+ inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared = c10::nullopt, c10::optional<int64_t> size = 0, at::TensorOptions options = {}) {
348
+ at::AutoDispatchBelowADInplaceOrView guard;
349
+ return autograd::make_variable(at::from_file(filename, shared, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
350
+ }
351
+ inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options = {}) {
352
+ at::AutoDispatchBelowADInplaceOrView guard;
353
+ return autograd::make_variable(at::hann_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
354
+ }
355
+ inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
356
+ at::AutoDispatchBelowADInplaceOrView guard;
357
+ return autograd::make_variable(at::hann_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
358
+ }
359
+ inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options = {}) {
360
+ at::AutoDispatchBelowADInplaceOrView guard;
361
+ return autograd::make_variable(at::hamming_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
362
+ }
363
+ inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
364
+ at::AutoDispatchBelowADInplaceOrView guard;
365
+ return autograd::make_variable(at::hamming_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
366
+ }
367
+ inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options = {}) {
368
+ at::AutoDispatchBelowADInplaceOrView guard;
369
+ return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
370
+ }
371
+ inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options = {}) {
372
+ at::AutoDispatchBelowADInplaceOrView guard;
373
+ return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
374
+ }
375
+ inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options = {}) {
376
+ at::AutoDispatchBelowADInplaceOrView guard;
377
+ return autograd::make_variable(at::kaiser_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
378
+ }
379
+ inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
380
+ at::AutoDispatchBelowADInplaceOrView guard;
381
+ return autograd::make_variable(at::kaiser_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
382
+ }
383
+ inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options = {}) {
384
+ at::AutoDispatchBelowADInplaceOrView guard;
385
+ return autograd::make_variable(at::kaiser_window(window_length, periodic, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
386
+ }
387
+ inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
388
+ at::AutoDispatchBelowADInplaceOrView guard;
389
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
390
+ }
391
+ inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
392
+ at::AutoDispatchBelowADInplaceOrView guard;
393
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
394
+ }
395
+ inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
396
+ at::AutoDispatchBelowADInplaceOrView guard;
397
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
398
+ }
399
+ inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
400
+ at::AutoDispatchBelowADInplaceOrView guard;
401
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
402
+ }
403
+ inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
404
+ at::AutoDispatchBelowADInplaceOrView guard;
405
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
406
+ }
407
+ inline at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
408
+ at::AutoDispatchBelowADInplaceOrView guard;
409
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
410
+ }
411
+ inline at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
412
+ at::AutoDispatchBelowADInplaceOrView guard;
413
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
414
+ }
415
+ inline at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
416
+ at::AutoDispatchBelowADInplaceOrView guard;
417
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
418
+ }
419
+ inline at::Tensor ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
420
+ at::AutoDispatchBelowADInplaceOrView guard;
421
+ return autograd::make_variable(at::ones(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
422
+ }
423
+ inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options = {}) {
424
+ at::AutoDispatchBelowADInplaceOrView guard;
425
+ return autograd::make_variable(at::ones(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
426
+ }
427
+ inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
428
+ at::AutoDispatchBelowADInplaceOrView guard;
429
+ return autograd::make_variable(at::ones_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
430
+ }
431
+ inline at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
432
+ at::AutoDispatchBelowADInplaceOrView guard;
433
+ return autograd::make_variable(at::ones_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
434
+ }
435
+ inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options = {}) {
436
+ at::AutoDispatchBelowADInplaceOrView guard;
437
+ return autograd::make_variable(at::scalar_tensor(s, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
438
+ }
439
+ inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
440
+ at::AutoDispatchBelowADInplaceOrView guard;
441
+ return autograd::make_variable(at::rand(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
442
+ }
443
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
444
+ at::AutoDispatchBelowADInplaceOrView guard;
445
+ return autograd::make_variable(at::rand_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
446
+ }
447
+ inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
448
+ at::AutoDispatchBelowADInplaceOrView guard;
449
+ return autograd::make_variable(at::rand(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
450
+ }
451
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
452
+ at::AutoDispatchBelowADInplaceOrView guard;
453
+ return autograd::make_variable(at::rand_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
454
+ }
455
+ inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options = {}) {
456
+ at::AutoDispatchBelowADInplaceOrView guard;
457
+ return autograd::make_variable(at::rand(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
458
+ }
459
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
460
+ at::AutoDispatchBelowADInplaceOrView guard;
461
+ return autograd::make_variable(at::rand_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
462
+ }
463
+ inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
464
+ at::AutoDispatchBelowADInplaceOrView guard;
465
+ return autograd::make_variable(at::rand(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
466
+ }
467
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
468
+ at::AutoDispatchBelowADInplaceOrView guard;
469
+ return autograd::make_variable(at::rand_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
470
+ }
471
+ inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
472
+ at::AutoDispatchBelowADInplaceOrView guard;
473
+ return autograd::make_variable(at::rand_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
474
+ }
475
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
476
+ at::AutoDispatchBelowADInplaceOrView guard;
477
+ return autograd::make_variable(at::randint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
478
+ }
479
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
480
+ at::AutoDispatchBelowADInplaceOrView guard;
481
+ return autograd::make_variable(at::randint_symint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
482
+ }
483
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
484
+ at::AutoDispatchBelowADInplaceOrView guard;
485
+ return autograd::make_variable(at::randint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
486
+ }
487
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
488
+ at::AutoDispatchBelowADInplaceOrView guard;
489
+ return autograd::make_variable(at::randint_symint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
490
+ }
491
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
492
+ at::AutoDispatchBelowADInplaceOrView guard;
493
+ return autograd::make_variable(at::randint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
494
+ }
495
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
496
+ at::AutoDispatchBelowADInplaceOrView guard;
497
+ return autograd::make_variable(at::randint_symint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
498
+ }
499
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
500
+ at::AutoDispatchBelowADInplaceOrView guard;
501
+ return autograd::make_variable(at::randint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
502
+ }
503
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
504
+ at::AutoDispatchBelowADInplaceOrView guard;
505
+ return autograd::make_variable(at::randint_symint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
506
+ }
507
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
508
+ at::AutoDispatchBelowADInplaceOrView guard;
509
+ return autograd::make_variable(at::randint_like(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
510
+ }
511
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
512
+ at::AutoDispatchBelowADInplaceOrView guard;
513
+ return autograd::make_variable(at::randint_like_symint(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
514
+ }
515
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
516
+ at::AutoDispatchBelowADInplaceOrView guard;
517
+ return autograd::make_variable(at::randint_like(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
518
+ }
519
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
520
+ at::AutoDispatchBelowADInplaceOrView guard;
521
+ return autograd::make_variable(at::randint_like_symint(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
522
+ }
523
+ inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options = {}) {
524
+ at::AutoDispatchBelowADInplaceOrView guard;
525
+ return autograd::make_variable(at::randn(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
526
+ }
527
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
528
+ at::AutoDispatchBelowADInplaceOrView guard;
529
+ return autograd::make_variable(at::randn_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
530
+ }
531
+ inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
532
+ at::AutoDispatchBelowADInplaceOrView guard;
533
+ return autograd::make_variable(at::randn(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
534
+ }
535
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
536
+ at::AutoDispatchBelowADInplaceOrView guard;
537
+ return autograd::make_variable(at::randn_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
538
+ }
539
+ inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
540
+ at::AutoDispatchBelowADInplaceOrView guard;
541
+ return autograd::make_variable(at::randn(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
542
+ }
543
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
544
+ at::AutoDispatchBelowADInplaceOrView guard;
545
+ return autograd::make_variable(at::randn_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
546
+ }
547
+ inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
548
+ at::AutoDispatchBelowADInplaceOrView guard;
549
+ return autograd::make_variable(at::randn(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
550
+ }
551
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
552
+ at::AutoDispatchBelowADInplaceOrView guard;
553
+ return autograd::make_variable(at::randn_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
554
+ }
555
+ inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
556
+ at::AutoDispatchBelowADInplaceOrView guard;
557
+ return autograd::make_variable(at::randn_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
558
+ }
559
+ inline at::Tensor randperm(int64_t n, at::TensorOptions options = at::kLong) {
560
+ at::AutoDispatchBelowADInplaceOrView guard;
561
+ return autograd::make_variable(at::randperm(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
562
+ }
563
+ inline at::Tensor randperm_symint(c10::SymInt n, at::TensorOptions options = at::kLong) {
564
+ at::AutoDispatchBelowADInplaceOrView guard;
565
+ return autograd::make_variable(at::randperm_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
566
+ }
567
+ inline at::Tensor randperm(int64_t n, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
568
+ at::AutoDispatchBelowADInplaceOrView guard;
569
+ return autograd::make_variable(at::randperm(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
570
+ }
571
+ inline at::Tensor randperm_symint(c10::SymInt n, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
572
+ at::AutoDispatchBelowADInplaceOrView guard;
573
+ return autograd::make_variable(at::randperm_symint(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
574
+ }
575
+ inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step = 1, at::TensorOptions options = {}) {
576
+ at::AutoDispatchBelowADInplaceOrView guard;
577
+ return autograd::make_variable(at::range(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
578
+ }
579
+ inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
580
+ at::AutoDispatchBelowADInplaceOrView guard;
581
+ return autograd::make_variable(at::range(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
582
+ }
583
+ inline at::Tensor zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
584
+ at::AutoDispatchBelowADInplaceOrView guard;
585
+ return autograd::make_variable(at::zeros(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
586
+ }
587
+ inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options = {}) {
588
+ at::AutoDispatchBelowADInplaceOrView guard;
589
+ return autograd::make_variable(at::_efficientzerotensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
590
+ }
591
+ inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
592
+ at::AutoDispatchBelowADInplaceOrView guard;
593
+ return autograd::make_variable(at::_efficientzerotensor_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
594
+ }
595
+ inline at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options = {}) {
596
+ at::AutoDispatchBelowADInplaceOrView guard;
597
+ return autograd::make_variable(at::zeros(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
598
+ }
599
+ inline at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
600
+ at::AutoDispatchBelowADInplaceOrView guard;
601
+ return autograd::make_variable(at::zeros_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
602
+ }
603
+ inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
604
+ at::AutoDispatchBelowADInplaceOrView guard;
605
+ return autograd::make_variable(at::zeros_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
606
+ }
607
+ inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
608
+ at::AutoDispatchBelowADInplaceOrView guard;
609
+ return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
610
+ }
611
+ inline at::Tensor sparse_compressed_tensor_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options) {
612
+ at::AutoDispatchBelowADInplaceOrView guard;
613
+ return autograd::make_variable(at::sparse_compressed_tensor_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
614
+ }
615
+ inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
616
+ at::AutoDispatchBelowADInplaceOrView guard;
617
+ return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
618
+ }
619
+ inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
620
+ at::AutoDispatchBelowADInplaceOrView guard;
621
+ return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
622
+ }
623
+ inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
624
+ at::AutoDispatchBelowADInplaceOrView guard;
625
+ return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
626
+ }
627
+ inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
628
+ at::AutoDispatchBelowADInplaceOrView guard;
629
+ return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
630
+ }
631
+ inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) {
632
+ at::AutoDispatchBelowADInplaceOrView guard;
633
+ return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
634
+ }
635
+ inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
636
+ at::AutoDispatchBelowADInplaceOrView guard;
637
+ return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
638
+ }
639
+ inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
640
+ at::AutoDispatchBelowADInplaceOrView guard;
641
+ return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
642
+ }
643
+ inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
644
+ at::AutoDispatchBelowADInplaceOrView guard;
645
+ return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
646
+ }
647
+ inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
648
+ at::AutoDispatchBelowADInplaceOrView guard;
649
+ return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
650
+ }
651
+ inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
652
+ at::AutoDispatchBelowADInplaceOrView guard;
653
+ return autograd::make_variable(at::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
654
+ }
655
+ inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}) {
656
+ at::AutoDispatchBelowADInplaceOrView guard;
657
+ return autograd::make_variable(at::_sparse_compressed_tensor_unsafe_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
658
+ }
659
+ inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
660
+ at::AutoDispatchBelowADInplaceOrView guard;
661
+ return autograd::make_variable(at::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
662
+ }
663
+ inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
664
+ at::AutoDispatchBelowADInplaceOrView guard;
665
+ return autograd::make_variable(at::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
666
+ }
667
+ inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
668
+ at::AutoDispatchBelowADInplaceOrView guard;
669
+ return autograd::make_variable(at::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
670
+ }
671
+ inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
672
+ at::AutoDispatchBelowADInplaceOrView guard;
673
+ return autograd::make_variable(at::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
674
+ }
675
+ inline at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) {
676
+ at::AutoDispatchBelowADInplaceOrView guard;
677
+ return autograd::make_variable(at::sparse_coo_tensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
678
+ }
679
+ inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
680
+ at::AutoDispatchBelowADInplaceOrView guard;
681
+ return autograd::make_variable(at::sparse_coo_tensor(indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
682
+ }
683
+ inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
684
+ at::AutoDispatchBelowADInplaceOrView guard;
685
+ return autograd::make_variable(at::sparse_coo_tensor(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
686
+ }
687
+ inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
688
+ at::AutoDispatchBelowADInplaceOrView guard;
689
+ return autograd::make_variable(at::_sparse_coo_tensor_unsafe(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
690
+ }
691
+ inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
692
+ at::AutoDispatchBelowADInplaceOrView guard;
693
+ return autograd::make_variable(at::_sparse_coo_tensor_unsafe_symint(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
694
+ }
695
+ inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
696
+ at::AutoDispatchBelowADInplaceOrView guard;
697
+ return autograd::make_variable(at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
698
+ }
699
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional<bool> is_coalesced = c10::nullopt) {
700
+ at::AutoDispatchBelowADInplaceOrView guard;
701
+ return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
702
+ }
703
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional<bool> is_coalesced = c10::nullopt) {
704
+ at::AutoDispatchBelowADInplaceOrView guard;
705
+ return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors_symint(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
706
+ }
707
+ inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options = {}, bool non_blocking = false, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
708
+ at::AutoDispatchBelowADInplaceOrView guard;
709
+ return autograd::make_variable(at::_to_copy(self, at::TensorOptions(options).requires_grad(c10::nullopt), non_blocking, memory_format), /*requires_grad=*/options.requires_grad());
710
+ }
711
+ inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
712
+ at::AutoDispatchBelowADInplaceOrView guard;
713
+ return autograd::make_variable(at::tril_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
714
+ }
715
+ inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
716
+ at::AutoDispatchBelowADInplaceOrView guard;
717
+ return autograd::make_variable(at::triu_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
718
+ }
719
+ inline at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
720
+ at::AutoDispatchBelowADInplaceOrView guard;
721
+ return autograd::make_variable(at::normal(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
722
+ }
723
+ inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
724
+ at::AutoDispatchBelowADInplaceOrView guard;
725
+ return autograd::make_variable(at::normal_symint(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
726
+ }
727
+ inline at::Tensor fft_fftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
728
+ at::AutoDispatchBelowADInplaceOrView guard;
729
+ return autograd::make_variable(at::fft_fftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
730
+ }
731
+ inline at::Tensor fft_rfftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
732
+ at::AutoDispatchBelowADInplaceOrView guard;
733
+ return autograd::make_variable(at::fft_rfftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
734
+ }
735
+
736
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/ThreadLocalState.h>
3
+ #include <ATen/core/Tensor.h>
4
+ #include <c10/util/ThreadLocal.h>
5
+ #include <torch/csrc/autograd/input_buffer.h>
6
+ #include <torch/csrc/autograd/utils/warnings.h>
7
+ #include <vector>
8
+
9
+ namespace torch::autograd {
10
+
11
+ using edge_list = std::vector<Edge>;
12
+ struct ReadyQueue;
13
+
14
+ static constexpr int NO_DEVICE = -2;
15
+ static constexpr int CPU_DEVICE = -1;
16
+
17
+ // GraphTask holds metadata needed for a single execution of backward()
18
+ struct GraphTask : std::enable_shared_from_this<GraphTask> {
19
+ std::atomic<uint64_t> outstanding_tasks_{0};
20
+ // Indicates if an error occurred while executing any task. When this is
21
+ // true, it signals all threads to stop executing.
22
+ std::atomic_bool has_error_{false};
23
+ std::atomic_bool future_completed_{false};
24
+ // It is safe to read keep_graph_ without synchronization
25
+ bool keep_graph_;
26
+
27
+ // To protect reads/writes to not_ready_, dependencies_, captured_vars_,
28
+ // has_error_, future_result_, cpu_ready_queue_, and leaf_streams.
29
+ std::mutex mutex_;
30
+ std::unordered_map<Node*, InputBuffer> not_ready_;
31
+ std::unordered_map<Node*, int> dependencies_;
32
+
33
+ // Records the nodes that are in the graph
34
+ std::unordered_set<Node*> nodes_in_graph_;
35
+ c10::SmallVector<Node*, 4> graph_roots_;
36
+ // Note [Exec info]
37
+ // Exec info is created for each GraphTask, which allows filtering paths on
38
+ // the graph that are not needed. It has a bit complicated semantics. If it's
39
+ // empty, it means the task is run in a "default" mode, which means that all
40
+ // next_edges we encounter should get executed. If it's not empty, only
41
+ // functions that have an entry and this entry has needed == True should be
42
+ // executed. exec_info is only empty when the graph is executed via
43
+ // .backward() and the inputs parameter is not passed. Otherwise, when
44
+ // executed through .grad(), or when inputs arg is specified for .backward(),
45
+ // exec_info will be non-empty.
46
+ //
47
+ struct ExecInfo {
48
+ struct Capture {
49
+ Capture(const Capture&) = delete;
50
+ Capture(Capture&&) = default;
51
+
52
+ Capture(int input_idx, int output_idx)
53
+ : input_idx_(input_idx), output_idx_(output_idx) {}
54
+ int input_idx_; // within Node inputs
55
+ int output_idx_; // within the output vector of a GraphTask
56
+
57
+ // This hook will be executed after a grad is captured. The captured
58
+ // grad will be replaced by the return value of the hook.
59
+ struct GradCaptureHook {
60
+ virtual ~GradCaptureHook() = default;
61
+ virtual at::Tensor operator()(const at::Tensor& grad) = 0;
62
+ };
63
+ // NOTE [Deprecated capture hooks]
64
+ //
65
+ // The current status of capture hooks is that we continue to support
66
+ // the single usage of it by distributed in the dist_engine. If anyone
67
+ // else needs to use it for other purposes, they should file an issue.
68
+ //
69
+ // Capture hooks were originally created because there did not exist
70
+ // any way to register pre/post hooks to grad_fn in a way such that it
71
+ // would still be executed even if that is the grad_fn of a Tensor
72
+ // passed as input= of .grad. As far as I know, only dist_engine uses
73
+ // this hook.
74
+ //
75
+ // However, there are other alternatives today like tensor hooks that can
76
+ // replace the usage that originally motivated its creation. Also,
77
+ // Captures hooks are an outlier in terms of the types of hook that
78
+ // autograd offers in how it is registered and behaves, e.g. it is a hook
79
+ // registered not to the graph, but to a particular graph_task! This makes
80
+ // it a burden to maintain.
81
+ //
82
+ // It would be very nice to clean up/do a migration from pre/post
83
+ // hooks used in distributed to use tensor hooks, but for now we just
84
+ // mark this method as deprecated to prevent additional usage.
85
+ //
86
+ // If you still think you really need to capture hooks, please file an
87
+ // issue (and tag autograd).
88
+ const std::vector<std::unique_ptr<GradCaptureHook>>&
89
+ DO_NOT_USE_DEPRECATED_get_capture_hooks() const {
90
+ return hooks_;
91
+ }
92
+ // See NOTE [deprecated capture hooks]
93
+ void DO_NOT_USE_DEPRECATED_register_capture_hook(
94
+ std::unique_ptr<GradCaptureHook> hook) {
95
+ hooks_.push_back(std::move(hook));
96
+ }
97
+
98
+ private:
99
+ // The hooks will be called one by one in the order as they were added.
100
+ // The input grad of a hook will be the output of its preceding hook. The
101
+ // first hook will take the captured grad as the input. The output of the
102
+ // last hook will replace the captured grad.
103
+ std::vector<std::unique_ptr<GradCaptureHook>> hooks_;
104
+ };
105
+
106
+ bool should_execute() const {
107
+ return needed_ || captures_;
108
+ }
109
+
110
+ bool needed_ = false;
111
+ std::unique_ptr<std::vector<Capture>> captures_;
112
+ };
113
+ // exec_info_ is safe to read without synchronization
114
+ std::unordered_map<Node*, ExecInfo> exec_info_;
115
+ // Captures variables are grads captured that we return to the user. After
116
+ // execution of the GraphTask is completed, the captured_vars_ are moved
117
+ // out of the GraphTask and are no longer valid.
118
+ std::vector<Variable> captured_vars_;
119
+
120
+ // Note: this field is not ready to be used until the proper
121
+ // `thread_locals_.set_grad_mode()` call in the constructor.
122
+ at::ThreadLocalState thread_locals_ = at::ThreadLocalState();
123
+
124
+ std::unordered_set<c10::Stream> leaf_streams;
125
+
126
+ // Per-device current streams of the execute() that called this GraphTask.
127
+ // These will be synced with leaf_streams in exec_post_processing.
128
+ std::vector<c10::optional<c10::Stream>> caller_current_streams_;
129
+
130
+ // Collects caller_current_streams_ for the accelerator device.
131
+ void stash_current_streams();
132
+
133
+ void init_to_execute(
134
+ Node& graph_root,
135
+ const edge_list& outputs,
136
+ bool accumulate_grad,
137
+ uint64_t min_topo_nr);
138
+
139
+ // The value of worker_device in the thread that created this task.
140
+ // See Note [Reentrant backwards]
141
+ // Safe to read owner_ and reentrant_depth_ without synchronization
142
+ int owner_;
143
+ // The number of parent graph tasks for this graph task
144
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
145
+ const int reentrant_depth_;
146
+
147
+ bool can_checkpoint() const {
148
+ return exec_info_.empty();
149
+ }
150
+
151
+ // check if the GraphTask is completed or not
152
+ bool completed();
153
+ // mark the graph task as completed and trigger post processing
154
+ void mark_as_completed_and_run_post_processing();
155
+
156
+ // Set an appropriate exception on this graph_task which was encountered while
157
+ // running the provided function.
158
+ void set_exception(std::exception_ptr eptr, const std::shared_ptr<Node>& fn);
159
+
160
+ // Set an appropriate exception on this graph_task which was encountered while
161
+ // running the provided function. But doesn't signal completion on
162
+ // 'future_result_' right away. The user needs to explicitly mark
163
+ // 'future_result_' completed with an appropriate exception.
164
+ void set_exception_without_signal(const std::shared_ptr<Node>& fn);
165
+
166
+ // Whether or not to stop execution for this GraphTask when an error is
167
+ // encountered. When set to true, this would cause Engine::execute() to throw
168
+ // an exception as soon as the autograd engine receives an exception.
169
+ bool exit_on_error_;
170
+
171
+ // CPU threads are dedicated to processing CPU work for the backward they
172
+ // invoked. So any given graph task maintains its own cpu_ready_queue_ where
173
+ // you should send work for it to be done. We memoize the cpu_ready_queue_ per
174
+ // GraphTask so that we know which ready queue we should push to if we are on
175
+ // device thread (i.e. GPU) and but next NodeTask should be run on CPU.
176
+ std::shared_ptr<ReadyQueue> cpu_ready_queue_;
177
+
178
+ // Future representing the completion of the graph task. Notified when all
179
+ // tasks are done.
180
+ c10::intrusive_ptr<at::ivalue::Future> future_result_;
181
+
182
+ // Final callbacks installed during execution of this GraphTask
183
+ std::vector<std::function<void()>> final_callbacks_;
184
+ // To protect reads and writes to final_callbacks_. Intentionally no reusing
185
+ // mutex_ as the two are protecting different data structures.
186
+ std::mutex final_callbacks_lock_;
187
+
188
+ utils::DelayWarningHandler warning_handler_;
189
+
190
+ uint64_t id_;
191
+
192
+ GraphTask(
193
+ bool keep_graph,
194
+ bool grad_mode,
195
+ int reentrant_depth,
196
+ std::shared_ptr<ReadyQueue> cpu_ready_queue,
197
+ c10::SmallVector<Node*, 4> graph_roots,
198
+ bool exit_on_error = false);
199
+
200
+ private:
201
+ // run GraphTask post processing
202
+ void exec_post_processing();
203
+ };
204
+
205
+ // The guard that sets and restores current_graph_task.
206
+ class GraphTaskGuard {
207
+ public:
208
+ explicit GraphTaskGuard(std::shared_ptr<GraphTask> graph_task);
209
+ ~GraphTaskGuard();
210
+
211
+ void restore_current_graph_task();
212
+
213
+ private:
214
+ std::shared_ptr<GraphTask> last_graph_task_;
215
+ };
216
+
217
+ TORCH_API const std::unordered_map<Node*, GraphTask::ExecInfo>*
218
+ get_current_graph_task_exec_info();
219
+ TORCH_API const std::unordered_set<Node*>*
220
+ get_current_graph_task_nodes_in_graph();
221
+ TORCH_API bool get_current_graph_task_keep_graph();
222
+ TORCH_API std::vector<Node*> get_current_graph_task_execution_order();
223
+ TORCH_API int get_current_graph_task_id();
224
+ void add_node_to_current_graph_task_exec_info(Node* fn);
225
+
226
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // The InputBuffer class accumulates a list of Variables for use by a
4
+ // function. It implements logic to avoid modifying the passed
5
+ // values in-place (adding an input twice will accumulate the result).
6
+ // This behaviour is needed and used only in backward graphs.
7
+
8
+ #include <utility>
9
+ #include <vector>
10
+
11
+ #include <c10/core/Stream.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <torch/csrc/autograd/variable.h>
14
+
15
+ namespace torch::autograd {
16
+
17
+ struct InputBuffer {
18
+ explicit InputBuffer(size_t size) : buffer(size) {}
19
+ InputBuffer(const InputBuffer& other) = delete;
20
+ InputBuffer(InputBuffer&& other) = default;
21
+ explicit InputBuffer(variable_list&& inputs) : buffer(std::move(inputs)){};
22
+ InputBuffer& operator=(InputBuffer&& other) = default;
23
+
24
+ // Accumulates the variable at a specified index.
25
+ // The optional CUDA streams determine which stream the accumulation
26
+ // is run on and how the addition is synchronized.
27
+ TORCH_API void add(
28
+ size_t pos,
29
+ Variable&& var,
30
+ const c10::optional<c10::Stream>& opt_producer_stream,
31
+ const c10::optional<c10::Stream>& opt_consumer_stream);
32
+
33
+ at::Device device() const;
34
+
35
+ Variable operator[](size_t pos) {
36
+ return buffer[pos];
37
+ }
38
+
39
+ // Returns the inputs as a list of variables. Destroys given InputBuffer.
40
+ static std::vector<Variable> variables(InputBuffer&& g);
41
+
42
+ std::vector<Variable> buffer;
43
+ };
44
+
45
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/jit_decomp_interface.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/core/function_schema.h>
5
+ #include <c10/macros/Export.h>
6
+
7
+ // NOTE: [Jit Decomposition Interface]
8
+ //
9
+ // For some context of why we need this at all, see NOTE: [forward-mode AD
10
+ // decompositions mechanism]
11
+ //
12
+ // Introducing that mechanism from the NOTE is problematic because:
13
+ // - it relies on TorchScript, so now VariableTypeX.cpp depends on TorchScript.
14
+ // - there exist internal builds like lite_trainer, which depend on VariableType
15
+ // but do not depend on TorchScript.
16
+ //
17
+ // For internal builds like lite_trainer builds to pass, and for OSS builds that
18
+ // do depend on TorchScript to still support the forward AD decomp mechanism, we
19
+ // implement a PImpl pattern to avoid a static dependency in favor of a dynamic
20
+ // one
21
+ // - during static initialization time, if the library is built with TorchScript
22
+ // setJitDecompImpl is called in decomposition_registry.cpp setting a global
23
+ // ptr to the impl
24
+ // - when the program is run,if getJitDecompImpl returns a non null ptr, we can
25
+ // carry on normally, otherwise we gracefully error out
26
+ //
27
+ // For extra context, see VariableHooksInterface.h, where a similar technique
28
+ // is used
29
+
30
+ namespace torch::autograd::impl {
31
+
32
+ struct TORCH_API JitDecompInterface {
33
+ virtual ~JitDecompInterface() = default;
34
+ virtual bool has_jit_decomposition(
35
+ const c10::FunctionSchema& schema) const = 0;
36
+ virtual void run_jit_decomposition(
37
+ const c10::OperatorHandle& op,
38
+ jit::Stack* stack) const = 0;
39
+ };
40
+
41
+ TORCH_API void setJitDecompImpl(JitDecompInterface* impl);
42
+ TORCH_API JitDecompInterface* getJitDecompImpl();
43
+
44
+ struct TORCH_API JitDecompRegisterer {
45
+ explicit JitDecompRegisterer(JitDecompInterface* impl) {
46
+ setJitDecompImpl(impl);
47
+ }
48
+ };
49
+
50
+ } // namespace torch::autograd::impl
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/profiler_kineto.h>
4
+ #include <torch/csrc/autograd/profiler_legacy.h>
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_kineto.h ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <vector>
5
+
6
+ #include <torch/csrc/profiler/api.h>
7
+ #include <torch/csrc/profiler/events.h>
8
+ #include <torch/csrc/profiler/stubs/base.h>
9
+ #include <torch/csrc/profiler/util.h>
10
+
11
+ namespace torch {
12
+
13
+ namespace profiler::impl {
14
+ struct Result;
15
+ namespace kineto {
16
+ struct ActivityTraceWrapper;
17
+ } // namespace kineto
18
+ } // namespace profiler::impl
19
+
20
+ namespace autograd::profiler {
21
+ using experimental_event_t = std::shared_ptr<torch::profiler::impl::Result>;
22
+ using extra_meta_t = std::unordered_map<std::string, std::string>;
23
+
24
+ struct TORCH_API KinetoEvent {
25
+ KinetoEvent(
26
+ const std::shared_ptr<const torch::profiler::impl::Result>&,
27
+ const bool verbose);
28
+
29
+ uint64_t startThreadId() const;
30
+ uint64_t endThreadId() const;
31
+ uint8_t activityType() const;
32
+ uint64_t fwdThreadId() const;
33
+ bool hasShapes() const;
34
+ const c10::ArrayRef<std::vector<int64_t>> shapes() const;
35
+ bool hasTypes() const;
36
+ const c10::ArrayRef<std::string> dtypes() const;
37
+ bool hasConcreteInputs() const;
38
+ const c10::ArrayRef<c10::IValue> concreteInputs() const;
39
+ uint64_t flops() const;
40
+ int64_t sequenceNr() const;
41
+ bool hasStack() const;
42
+ const c10::ArrayRef<std::string> stack() const;
43
+ uint8_t scope() const;
44
+ bool hasModuleHierarchy() const;
45
+ const c10::ArrayRef<std::string> moduleHierarchy() const;
46
+ int64_t debugHandle() const;
47
+ std::string name() const;
48
+ c10::DeviceType deviceType() const;
49
+ int deviceIndex() const;
50
+ int64_t nBytes() const;
51
+ uint64_t startUs() const;
52
+ uint64_t durationUs() const;
53
+ bool isAsync() const;
54
+ uint64_t correlationId() const;
55
+ uint64_t linkedCorrelationId() const;
56
+ int64_t deviceResourceId() const;
57
+ std::string backend() const;
58
+ bool isPythonFunction() const;
59
+ int64_t cudaElapsedUs() const;
60
+ int64_t privateuse1ElapsedUs() const;
61
+ void getPerfEventCounters(torch::profiler::perf_counters_t&) const;
62
+ extra_meta_t extraMeta() const;
63
+
64
+ private:
65
+ torch::profiler::impl::ProfilerVoidEventStub fallbackStart() const;
66
+ torch::profiler::impl::ProfilerVoidEventStub fallbackEnd() const;
67
+
68
+ std::shared_ptr<const torch::profiler::impl::Result> result_;
69
+ std::vector<std::string> python_stack_;
70
+
71
+ // Copy fields from result so we can return ArrayRefs.
72
+ std::vector<std::vector<int64_t>> shapes_;
73
+ std::vector<std::string> dtypes_;
74
+ std::vector<c10::IValue> concrete_inputs_;
75
+ };
76
+
77
+ // Consolidating events returned directly from Kineto
78
+ // with events manually created by us (e.g. start/stop marks,
79
+ // memory allocation events)
80
+ struct TORCH_API ProfilerResult {
81
+ ProfilerResult();
82
+ ProfilerResult(
83
+ uint64_t start_time,
84
+ std::vector<KinetoEvent> events,
85
+ std::unique_ptr<torch::profiler::impl::kineto::ActivityTraceWrapper>&&
86
+ trace,
87
+ std::vector<experimental_event_t>&& event_tree);
88
+ ~ProfilerResult();
89
+
90
+ uint64_t trace_start_us() const {
91
+ return trace_start_us_;
92
+ }
93
+
94
+ const std::vector<KinetoEvent>& events() const {
95
+ return events_;
96
+ }
97
+
98
+ const std::vector<experimental_event_t>& event_tree() const {
99
+ return event_tree_;
100
+ }
101
+
102
+ void save(const std::string& path);
103
+
104
+ private:
105
+ uint64_t trace_start_us_ = 0;
106
+ std::vector<KinetoEvent> events_;
107
+ std::unique_ptr<torch::profiler::impl::kineto::ActivityTraceWrapper> trace_;
108
+ std::vector<experimental_event_t> event_tree_;
109
+ };
110
+
111
+ /*
112
+ * This API is used by backends to record latency of events that
113
+ * happened in the backend but were not visible to pytorch runtime.
114
+ * For example, if part of the model is lowered to a dsp backend, then
115
+ * the execution of that part of the model is delegated to the backend.
116
+ * When backend finishes execution it has an option to provide profiling
117
+ * information (latency only at the moment) corresponding to different operators
118
+ * that were executed in the backend.
119
+ * When such events are recorded by backend using this API, the event
120
+ * records will be collected by active kineto profiler. If no kineto profiler
121
+ * is active then the event is ignored.
122
+ * This provides us with a way to generate all the profiling information
123
+ * for a model regardless of where model (or part of it) executed.
124
+ * @param start_time_us: start time in us of the event
125
+ * @param end_time_us: end time in us of the event
126
+ * @param debug_handle: debug handle to correlate this event/op with
127
+ * model level module/source information
128
+ * @param scope: scope of the event, e.g. LITE_INTERPRETER, RECORD_FN etc.
129
+ * @param event_name: name of the event, e.g. op name
130
+ * @param backend_name: name of the backend where the event took place.
131
+ */
132
+ TORCH_API void reportBackendEventToActiveKinetoProfiler(
133
+ const int64_t start_time_us,
134
+ const int64_t end_time_us,
135
+ const int64_t debug_handle,
136
+ const at::RecordScope scope,
137
+ const std::string& event_name,
138
+ const std::string& backend_name);
139
+
140
+ TORCH_API void enableProfiler(
141
+ const torch::profiler::impl::ProfilerConfig& config,
142
+ const std::set<torch::profiler::impl::ActivityType>& activities,
143
+ const std::unordered_set<at::RecordScope>& scopes = {});
144
+
145
+ /*
146
+ * Same as enableProfiler but with callback to do post-processing of
147
+ * KinetoEvents.
148
+ * enableProfilerWithEventPostProcess enables profiler to capture
149
+ * specified activities, with specified RecordFunction scope, if any.
150
+ * Additionally, it takes a functor that does in-place post processing of
151
+ * events, e.g. populate stack trace or module hierarchy information lazily
152
+ * using debug_handle.
153
+ * Example usage is with lite interpreter that has recording scope of
154
+ * LITE_INTERPRETER. In this case lite interpreter runtime, records debug
155
+ * handles in RecordFunction, along with other information. Debug handles are
156
+ * eventually passed down to KinetoEvent and recorded as part of the event.
157
+ * KinetoEdgeCPUProfiler, in torch/csrc/jit/mobile/profiler_edge.cpp, enables
158
+ * profiler using post-processing callback, via
159
+ * enableProfilerWithEventPostProcess, that takes these debug handles and
160
+ * generates stack trace and module hierarchy information, once profiling is
161
+ * done.
162
+ */
163
+ using post_process_t = std::function<void(
164
+ /*debug_handle */ int64_t,
165
+ /*jit_stack */ std::vector<std::string>&,
166
+ /*jit_modules */ std::vector<std::string>&)>;
167
+ TORCH_API void enableProfilerWithEventPostProcess(
168
+ const torch::profiler::impl::ProfilerConfig& config,
169
+ const std::set<torch::profiler::impl::ActivityType>& activities,
170
+ post_process_t&& cb,
171
+ const std::unordered_set<at::RecordScope>& scopes = {});
172
+
173
+ TORCH_API std::unique_ptr<ProfilerResult> disableProfiler();
174
+
175
+ TORCH_API void prepareProfiler(
176
+ const torch::profiler::impl::ProfilerConfig& config,
177
+ const std::set<torch::profiler::impl::ActivityType>& activities);
178
+
179
+ } // namespace autograd::profiler
180
+
181
+ namespace profiler::impl {
182
+
183
+ // Experimental.
184
+ TORCH_API void _reportVulkanEventToProfiler(vulkan_id_t id);
185
+
186
+ } // namespace profiler::impl
187
+
188
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <iostream>
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ #include <torch/csrc/Export.h>
11
+ #include <torch/csrc/profiler/api.h>
12
+ #include <torch/csrc/profiler/stubs/base.h>
13
+ #include <torch/csrc/profiler/util.h>
14
+
15
+ namespace torch::autograd {
16
+
17
+ struct Node;
18
+
19
+ namespace profiler {
20
+
21
+ enum class C10_API_ENUM EventKind : uint16_t {
22
+ Mark,
23
+ PushRange,
24
+ PopRange,
25
+ MemoryAlloc,
26
+ };
27
+
28
+ // To be deprecated, once we switch to Kineto profiling
29
+ struct TORCH_API LegacyEvent {
30
+ LegacyEvent(
31
+ EventKind kind,
32
+ at::StringView name,
33
+ uint16_t thread_id,
34
+ bool record_cuda,
35
+ at::RecordFunctionHandle handle = 0,
36
+ std::vector<std::vector<int64_t>>&& shapes = {},
37
+ int64_t node_id = -1,
38
+ bool is_async = false)
39
+ : name_(std::move(name)),
40
+ kind_(kind),
41
+ thread_id_(thread_id),
42
+ handle_(handle),
43
+ shapes_(std::move(shapes)),
44
+ node_id_(node_id),
45
+ is_async_(is_async) {
46
+ record(record_cuda);
47
+ }
48
+
49
+ // Constructor to be used in conjunction with LegacyEvent::fromIValue.
50
+ LegacyEvent(
51
+ EventKind kind,
52
+ at::StringView name,
53
+ uint16_t thread_id,
54
+ at::RecordFunctionHandle handle,
55
+ std::vector<std::vector<int64_t>>&& shapes,
56
+ int64_t node_id,
57
+ bool is_remote,
58
+ int64_t cpu_memory_usage,
59
+ int64_t cpu_ns,
60
+ bool cuda_recorded,
61
+ int64_t cuda_memory_usage = 0,
62
+ c10::DeviceIndex device = -1,
63
+ double cuda_us = -1)
64
+ : cpu_ns_(cpu_ns),
65
+ name_(std::move(name)),
66
+ kind_(kind),
67
+ thread_id_(thread_id),
68
+ handle_(handle),
69
+ shapes_(std::move(shapes)),
70
+ cpu_memory_usage_(cpu_memory_usage),
71
+ cuda_memory_usage_(cuda_memory_usage),
72
+ device_(device),
73
+ node_id_(node_id),
74
+ is_remote_(is_remote),
75
+ cuda_us_(static_cast<int64_t>(cuda_us)) {
76
+ // Sanity check values that were deserialized
77
+ TORCH_INTERNAL_ASSERT(cpu_ns_ > 0);
78
+ if (cuda_recorded) {
79
+ TORCH_INTERNAL_ASSERT(device_ >= 0);
80
+ TORCH_INTERNAL_ASSERT(cuda_us_ >= 0);
81
+ }
82
+ }
83
+
84
+ // Returns IValues corresponding to event structure, to be used for
85
+ // serialization.
86
+ at::IValue toIValue() const;
87
+
88
+ // Reconstructs an event from IValues given by toIValue.
89
+ static LegacyEvent fromIValue(const at::IValue& eventIValue);
90
+
91
+ void record(bool record_cuda);
92
+
93
+ std::string kindStr() const {
94
+ switch (kind_) {
95
+ case EventKind::Mark:
96
+ return "mark";
97
+ case EventKind::PushRange:
98
+ return "push";
99
+ case EventKind::PopRange:
100
+ return "pop";
101
+ case EventKind::MemoryAlloc:
102
+ return "memory_alloc";
103
+ }
104
+ throw std::runtime_error("unknown event kind");
105
+ }
106
+
107
+ EventKind kind() const {
108
+ return kind_;
109
+ }
110
+
111
+ const char* name() const {
112
+ return name_.str();
113
+ }
114
+
115
+ uint64_t threadId() const {
116
+ return thread_id_;
117
+ }
118
+
119
+ std::vector<std::vector<int64_t>> shapes() const {
120
+ return shapes_;
121
+ }
122
+
123
+ double cpuElapsedUs(const LegacyEvent& e) const {
124
+ return static_cast<double>(e.cpu_ns_ - cpu_ns_) / (1000.0);
125
+ }
126
+
127
+ void setCpuUs(int64_t cpu_us) {
128
+ cpu_ns_ = cpu_us * 1000;
129
+ }
130
+
131
+ double cpuUs() const {
132
+ return static_cast<double>(cpu_ns_) / (1000.0);
133
+ }
134
+
135
+ double cudaElapsedUs(const LegacyEvent& e) const;
136
+
137
+ bool hasCuda() const {
138
+ return cuda_event != nullptr || (isRemote() && device_ != -1);
139
+ }
140
+
141
+ c10::DeviceIndex device() const {
142
+ return device_;
143
+ }
144
+
145
+ void updateMemoryStats(int64_t alloc_size, c10::Device device) {
146
+ if (device.is_cuda() || device.type() == c10::DeviceType::HIP) {
147
+ cuda_memory_usage_ = alloc_size;
148
+ } else if (
149
+ device.is_cpu() || device.type() == c10::DeviceType::MKLDNN ||
150
+ device.type() == c10::DeviceType::IDEEP) {
151
+ cpu_memory_usage_ = alloc_size;
152
+ } else {
153
+ LOG(WARNING) << "Unsupported memory profiling device: " << device;
154
+ }
155
+ }
156
+
157
+ int64_t cpuMemoryUsage() const {
158
+ return cpu_memory_usage_;
159
+ }
160
+
161
+ int64_t cudaMemoryUsage() const {
162
+ return cuda_memory_usage_;
163
+ }
164
+
165
+ at::RecordFunctionHandle handle() const {
166
+ return handle_;
167
+ }
168
+
169
+ // Node ID corresponding to this event.
170
+ int64_t nodeId() const {
171
+ return node_id_;
172
+ }
173
+
174
+ // Set Node ID on this event.
175
+ void setNodeId(int64_t node_id) {
176
+ node_id_ = node_id;
177
+ }
178
+
179
+ void setName(at::StringView newName_) {
180
+ name_ = std::move(newName_);
181
+ }
182
+
183
+ bool isRemote() const {
184
+ return is_remote_;
185
+ }
186
+
187
+ void setCudaUs(int64_t cuda_us) {
188
+ cuda_us_ = cuda_us;
189
+ }
190
+
191
+ void setSequenceNr(int64_t sequence_nr) {
192
+ sequence_nr_ = sequence_nr;
193
+ }
194
+
195
+ int64_t sequenceNr() const {
196
+ return sequence_nr_;
197
+ }
198
+
199
+ void setCorrelationId(uint64_t correlation_id) {
200
+ correlation_id_ = correlation_id;
201
+ }
202
+
203
+ uint64_t correlationId() const {
204
+ return correlation_id_;
205
+ }
206
+
207
+ const std::vector<std::string>& stack() const {
208
+ return stack_;
209
+ }
210
+
211
+ void setStack(const std::vector<std::string>& stack) {
212
+ stack_ = stack;
213
+ }
214
+
215
+ uint64_t fwdThreadId() const {
216
+ return fwd_thread_id_;
217
+ }
218
+
219
+ void setFwdThreadId(uint64_t fwd_thread_id) {
220
+ fwd_thread_id_ = fwd_thread_id;
221
+ }
222
+
223
+ uint8_t scope() const {
224
+ return scope_;
225
+ }
226
+
227
+ void setScope(uint8_t scope) {
228
+ scope_ = scope;
229
+ }
230
+
231
+ const std::unordered_map<std::string, c10::IValue>& extraArgs() const {
232
+ return extra_args_;
233
+ }
234
+
235
+ void setExtraArgs(std::unordered_map<std::string, c10::IValue>&& save_args) {
236
+ extra_args_ = std::move(save_args);
237
+ }
238
+
239
+ uint64_t flops() {
240
+ return flops_;
241
+ }
242
+
243
+ bool isAsync() {
244
+ return is_async_;
245
+ }
246
+
247
+ void setFlops(uint64_t flops) {
248
+ flops_ = flops;
249
+ }
250
+
251
+ private:
252
+ // signed to allow for negative intervals, initialized for safety.
253
+ int64_t cpu_ns_ = 0;
254
+ at::StringView name_;
255
+ EventKind kind_;
256
+ uint64_t thread_id_;
257
+ uint64_t fwd_thread_id_{0};
258
+ at::RecordFunctionHandle handle_{0};
259
+ std::vector<std::vector<int64_t>> shapes_;
260
+ int64_t cpu_memory_usage_ = 0;
261
+ int64_t cuda_memory_usage_ = 0;
262
+ c10::DeviceIndex device_ = -1;
263
+ torch::profiler::impl::ProfilerVoidEventStub cuda_event = nullptr;
264
+ int64_t node_id_ = 0;
265
+ bool is_remote_ = false;
266
+ int64_t cuda_us_ = -1;
267
+ int64_t sequence_nr_ = -1;
268
+ bool is_async_ = false;
269
+
270
+ std::vector<std::string> stack_;
271
+ uint8_t scope_{0};
272
+ uint64_t correlation_id_{0};
273
+ // Extra arguments for computing op flops
274
+ std::unordered_map<std::string, c10::IValue> extra_args_;
275
+ uint64_t flops_ = 0;
276
+ };
277
+
278
+ // a linked-list of fixed sized vectors, to avoid
279
+ // a std::vector resize from taking a large amount of time inside
280
+ // a profiling event
281
+ struct RangeEventList {
282
+ RangeEventList() {
283
+ events_.reserve(kReservedCapacity);
284
+ }
285
+
286
+ template <typename... Args>
287
+ void record(Args&&... args) {
288
+ std::lock_guard<std::mutex> guard(mutex_);
289
+ events_.emplace_back(std::forward<Args>(args)...);
290
+ }
291
+
292
+ std::vector<LegacyEvent> consolidate() {
293
+ std::lock_guard<std::mutex> lock(mutex_);
294
+ std::vector<LegacyEvent> result;
295
+ result.insert(
296
+ result.begin(),
297
+ std::make_move_iterator(events_.begin()),
298
+ std::make_move_iterator(events_.end()));
299
+ events_.erase(events_.begin(), events_.end());
300
+ return result;
301
+ }
302
+
303
+ size_t size() {
304
+ std::lock_guard<std::mutex> lock(mutex_);
305
+ return events_.size();
306
+ }
307
+
308
+ private:
309
+ // This mutex is used to serialize access when different threads are writing
310
+ // to the same instance of RangeEventList.
311
+ std::mutex mutex_;
312
+ std::vector<LegacyEvent> events_;
313
+
314
+ static const size_t kReservedCapacity = 1024;
315
+ };
316
+
317
+ // A struct to control settings of disableProfiler options.
318
+ struct TORCH_API ProfilerDisableOptions {
319
+ ProfilerDisableOptions() = default;
320
+ ProfilerDisableOptions(bool shouldCleanupTLSState, bool shouldConsolidate)
321
+ : cleanupTLSState(shouldCleanupTLSState),
322
+ consolidate(shouldConsolidate) {}
323
+ // Whether we should clean up profiler states that are thread local, such as
324
+ // ThreadLocalDebugInfo and thread local RecordFunction callbacks.
325
+ bool cleanupTLSState = true;
326
+ // Whether we should consolidate all currently recorded profiled events. If
327
+ // false, will not consolidate and other threads can continue to write to the
328
+ // event lists.
329
+ bool consolidate = true;
330
+ };
331
+
332
+ // NOTE: profiler mode is thread local, with automatic propagation
333
+ // across thread boundary (e.g. at::launch tasks)
334
+ TORCH_API void enableProfilerLegacy(
335
+ const torch::profiler::impl::ProfilerConfig&);
336
+ using thread_event_lists = std::vector<std::vector<LegacyEvent>>;
337
+ TORCH_API thread_event_lists disableProfilerLegacy(
338
+ c10::optional<ProfilerDisableOptions> profilerDisableOptions =
339
+ c10::nullopt);
340
+
341
+ // adds profiledEvents to the current thread local recorded events. Each event
342
+ // will be marked with node ID given by fromNodeId.
343
+ TORCH_API void addEventList(std::vector<LegacyEvent>&& profiledEvents);
344
+ // Writes profiled events to a stream.
345
+ TORCH_API void writeProfilerEventsToStream(
346
+ std::ostream& out,
347
+ const std::vector<LegacyEvent*>& events);
348
+
349
+ // Usage:
350
+ // {
351
+ // RecordProfile guard("filename.trace");
352
+ // // code you want to profile
353
+ // }
354
+ // Then open filename.trace in chrome://tracing
355
+ struct TORCH_API RecordProfile {
356
+ RecordProfile(std::ostream& out);
357
+ RecordProfile(const std::string& filename);
358
+
359
+ ~RecordProfile();
360
+
361
+ private:
362
+ void init();
363
+ std::unique_ptr<std::ofstream> file_;
364
+ std::ostream& out_;
365
+ void processEvents(const std::vector<LegacyEvent*>& events);
366
+ };
367
+
368
+ // A guard that enables the legacy profiler, taking in an optional callback to
369
+ // process the results Usage:
370
+ // {
371
+ // TLSLegacyProfilerGuard g([](thread_event_lists profilerResults) {
372
+ // // process profilerResults
373
+ // });
374
+ // Code to profile
375
+ // }
376
+ struct TORCH_API TLSLegacyProfilerGuard {
377
+ explicit TLSLegacyProfilerGuard(
378
+ const torch::profiler::impl::ProfilerConfig& cfg,
379
+ c10::optional<std::function<void(const thread_event_lists&)>>
380
+ resultCallback = c10::nullopt,
381
+ c10::optional<ProfilerDisableOptions> profilerDisableOptions =
382
+ c10::nullopt)
383
+ : cb_(std::move(resultCallback)),
384
+ profilerDisableOptions_(profilerDisableOptions) {
385
+ enableProfilerLegacy(cfg);
386
+ }
387
+ ~TLSLegacyProfilerGuard() {
388
+ thread_event_lists event_lists =
389
+ disableProfilerLegacy(profilerDisableOptions_);
390
+ if (cb_) {
391
+ try {
392
+ (*cb_)(event_lists);
393
+ } catch (const std::exception& e) {
394
+ LOG(ERROR) << "Got error processing profiler events: " << e.what();
395
+ }
396
+ }
397
+ }
398
+
399
+ private:
400
+ c10::optional<std::function<void(const thread_event_lists&)>> cb_;
401
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
402
+ const c10::optional<ProfilerDisableOptions> profilerDisableOptions_;
403
+ };
404
+
405
+ } // namespace profiler
406
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_anomaly_mode.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <pybind11/pybind11.h>
4
+ #include <torch/csrc/autograd/anomaly_mode.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ namespace torch {
9
+ namespace autograd {
10
+
11
+ struct PyAnomalyMetadata : public AnomalyMetadata {
12
+ static constexpr const char* ANOMALY_TRACE_KEY = "traceback_";
13
+ static constexpr const char* ANOMALY_PARENT_KEY = "parent_";
14
+
15
+ PyAnomalyMetadata() {
16
+ pybind11::gil_scoped_acquire gil;
17
+ // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
18
+ dict_ = PyDict_New();
19
+ }
20
+ ~PyAnomalyMetadata() override {
21
+ // If python is already dead, leak the wrapped python objects
22
+ if (Py_IsInitialized()) {
23
+ pybind11::gil_scoped_acquire gil;
24
+ Py_DECREF(dict_);
25
+ }
26
+ }
27
+ void store_stack() override;
28
+ void print_stack(const std::string& current_node_name) override;
29
+ void assign_parent(const std::shared_ptr<Node>& parent_node) override;
30
+
31
+ PyObject* dict() {
32
+ return dict_;
33
+ }
34
+
35
+ private:
36
+ PyObject* dict_{nullptr};
37
+ };
38
+ void _print_stack(
39
+ PyObject* trace_stack,
40
+ const std::string& current_node_name,
41
+ bool is_parent);
42
+
43
+ } // namespace autograd
44
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_autograd.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_AUTOGRAD_H
2
+ #define THP_AUTOGRAD_H
3
+
4
+ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused);
5
+ void THPAutograd_initFunctions();
6
+
7
+ namespace torch::autograd {
8
+
9
+ PyMethodDef* python_functions();
10
+
11
+ }
12
+
13
+ #include <torch/csrc/autograd/python_engine.h>
14
+ #include <torch/csrc/autograd/python_function.h>
15
+ #include <torch/csrc/autograd/python_variable.h>
16
+
17
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_enum_tag.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ namespace torch::autograd {
6
+ void initEnumTag(PyObject* module);
7
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_legacy_variable.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Instantiates torch._C._LegacyVariableBase, which defines the Python
4
+ // constructor (__new__) for torch.autograd.Variable.
5
+
6
+ #include <torch/csrc/python_headers.h>
7
+
8
+ namespace torch::autograd {
9
+
10
+ void init_legacy_variable(PyObject* module);
11
+
12
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nested_functions.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::autograd {
4
+
5
+ PyMethodDef* get_nested_functions_manual();
6
+
7
+ void initNestedFunctions(PyObject* module);
8
+
9
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nn_functions.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::autograd {
4
+
5
+ void initNNFunctions(PyObject* module);
6
+
7
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_sparse_functions.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::autograd {
4
+
5
+ void initSparseFunctions(PyObject* module);
6
+
7
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <torch/csrc/python_headers.h>
5
+ #include <torch/csrc/utils/pythoncapi_compat.h>
6
+
7
+ #include <ATen/core/function_schema.h>
8
+ #include <pybind11/pybind11.h>
9
+ #include <torch/csrc/Exceptions.h>
10
+ #include <torch/csrc/Export.h>
11
+ #include <torch/csrc/autograd/variable.h>
12
+ #include <torch/csrc/utils/pybind.h>
13
+
14
+ namespace py = pybind11;
15
+
16
+ // Python object that backs torch.autograd.Variable
17
+ struct THPVariable {
18
+ PyObject_HEAD;
19
+ // Payload
20
+ c10::MaybeOwned<at::Tensor> cdata;
21
+ // Hooks to be run on backwards pass (corresponds to Python attr
22
+ // '_backwards_hooks', set by 'register_hook')
23
+ PyObject* backward_hooks = nullptr;
24
+ // Hooks to be run in the backwards pass after accumulate grad,
25
+ // i.e., after the .grad has been set (corresponds to Python attr
26
+ // '_post_accumulate_grad_hooks', set by 'register_post_accumulate_grad_hook')
27
+ PyObject* post_accumulate_grad_hooks = nullptr;
28
+ };
29
+
30
+ TORCH_PYTHON_API void registerPythonTensorClass(
31
+ const std::string& device,
32
+ PyObject* python_tensor_class);
33
+
34
+ TORCH_PYTHON_API void activateCUDATrace();
35
+
36
+ TORCH_PYTHON_API extern PyObject* THPVariableClass;
37
+ TORCH_PYTHON_API extern PyObject* ParameterClass;
38
+
39
+ bool THPVariable_initModule(PyObject* module);
40
+ TORCH_PYTHON_API PyObject* THPVariable_Wrap(at::TensorBase var);
41
+
42
+ static inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) {
43
+ // Check that a python object is a `Tensor`, but not a `Tensor` subclass.
44
+ // (A subclass could have different semantics.) The one exception is
45
+ // Parameter, which is used for Python bookkeeping but is equivalent to
46
+ // Tensor as far as C++ is concerned.
47
+ return (
48
+ tp == (PyTypeObject*)THPVariableClass ||
49
+ tp == (PyTypeObject*)ParameterClass);
50
+ }
51
+
52
+ static inline bool THPVariable_CheckExact(PyObject* obj) {
53
+ return THPVariable_CheckTypeExact(Py_TYPE(obj));
54
+ }
55
+
56
+ inline bool THPVariable_Check(PyObject* obj) {
57
+ if (!THPVariableClass)
58
+ return false;
59
+
60
+ // Fast path
61
+ if (THPVariable_CheckExact(obj)) {
62
+ return true;
63
+ }
64
+
65
+ const auto result = PyObject_IsInstance(obj, THPVariableClass);
66
+ if (result == -1)
67
+ throw python_error();
68
+ return result;
69
+ }
70
+
71
+ inline const at::Tensor& THPVariable_Unpack(THPVariable* var) {
72
+ return *var->cdata;
73
+ }
74
+
75
+ inline const at::Tensor& THPVariable_Unpack(PyObject* obj) {
76
+ return THPVariable_Unpack(reinterpret_cast<THPVariable*>(obj));
77
+ }
78
+
79
+ std::pair<py::object, py::dict> parseIValuesToPyArgsKwargs(
80
+ const c10::OperatorHandle& op,
81
+ const std::vector<c10::IValue>& arguments);
82
+
83
+ void pushPyOutToStack(
84
+ const c10::OperatorHandle& op,
85
+ torch::jit::Stack* stack,
86
+ py::object out,
87
+ const char* msg);
88
+
89
+ inline PyObject* THPVariable_WrapList(
90
+ const torch::autograd::variable_list& inputs) {
91
+ PyObject* pyinput = PyList_New(static_cast<Py_ssize_t>(inputs.size()));
92
+ for (const auto i : c10::irange(inputs.size())) {
93
+ PyList_SET_ITEM(pyinput, i, THPVariable_Wrap(inputs[i]));
94
+ }
95
+ return pyinput;
96
+ }
97
+
98
+ inline torch::autograd::variable_list THPVariable_UnpackList(
99
+ PyObject* pyresult) {
100
+ TORCH_CHECK(PyList_CheckExact(pyresult));
101
+ auto result_len = PyList_GET_SIZE(pyresult);
102
+ torch::autograd::variable_list result;
103
+ result.reserve(result_len);
104
+ for (const auto i : c10::irange(result_len)) {
105
+ PyObject* item = PyList_GET_ITEM(pyresult, i);
106
+ if (!Py_IsNone(item)) {
107
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(THPVariable_Check(item));
108
+ result.emplace_back(THPVariable_Unpack(item));
109
+ } else {
110
+ result.emplace_back();
111
+ }
112
+ }
113
+ return result;
114
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable_indexing.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+ #include <torch/csrc/autograd/python_variable.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+ #include <torch/csrc/utils/python_symnode.h>
8
+
9
+ namespace torch::autograd {
10
+
11
+ struct UnpackedSlice {
12
+ c10::SymInt start;
13
+ c10::SymInt stop;
14
+ c10::SymInt step;
15
+ };
16
+
17
+ // This mirrors Cpython's PySlice_Unpack method
18
+ static inline UnpackedSlice __PySlice_Unpack(PyObject* _r) {
19
+ PySliceObject* r = (PySliceObject*)_r;
20
+ /* this is harder to get right than you might think */
21
+
22
+ c10::SymInt start_sym, stop_sym, step_sym;
23
+
24
+ auto clip_val = [](Py_ssize_t val) {
25
+ if (val < c10::SymInt::min_representable_int()) {
26
+ auto r = PyErr_WarnEx(
27
+ PyExc_UserWarning,
28
+ "Truncating the start/stop/step "
29
+ "of slice. This is likely because of "
30
+ "saved old models when the start/stop/step were larger.",
31
+ 1);
32
+ if (r != 0) {
33
+ throw python_error();
34
+ }
35
+ return (Py_ssize_t)(c10::SymInt::min_representable_int());
36
+ }
37
+ return val;
38
+ };
39
+
40
+ if (r->step == Py_None) {
41
+ step_sym = c10::SymInt(1);
42
+ } else {
43
+ if (torch::is_symint(r->step)) {
44
+ auto step_sym = py::handle(r->step).cast<c10::SymInt>();
45
+ } else {
46
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
47
+ Py_ssize_t step;
48
+ if (!_PyEval_SliceIndex(r->step, &step)) {
49
+ throw python_error();
50
+ }
51
+ if (step == 0) {
52
+ PyErr_SetString(PyExc_ValueError, "slice step cannot be zero");
53
+ }
54
+
55
+ step = clip_val(step);
56
+ step_sym = c10::SymInt(step);
57
+ }
58
+ }
59
+
60
+ if (torch::is_symint(r->start)) {
61
+ start_sym = py::handle(r->start).cast<c10::SymInt>();
62
+ } else if (r->start == Py_None) {
63
+ start_sym = c10::SymInt(step_sym < 0 ? PY_SSIZE_T_MAX : 0);
64
+ } else {
65
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
66
+ Py_ssize_t start;
67
+ if (!_PyEval_SliceIndex(r->start, &start)) {
68
+ throw python_error();
69
+ }
70
+ start = clip_val(start);
71
+ start_sym = c10::SymInt(start);
72
+ }
73
+
74
+ if (torch::is_symint(r->stop)) {
75
+ stop_sym = py::handle(r->stop).cast<c10::SymInt>();
76
+ } else if (r->stop == Py_None) {
77
+ stop_sym = c10::SymInt(
78
+ step_sym < 0 ? c10::SymInt::min_representable_int() : PY_SSIZE_T_MAX);
79
+ } else {
80
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
81
+ Py_ssize_t stop;
82
+ if (!_PyEval_SliceIndex(r->stop, &stop)) {
83
+ throw python_error();
84
+ }
85
+ stop = clip_val(stop);
86
+ stop_sym = c10::SymInt(stop);
87
+ }
88
+
89
+ return UnpackedSlice{
90
+ std::move(start_sym), std::move(stop_sym), std::move(step_sym)};
91
+ }
92
+
93
+ Py_ssize_t THPVariable_length(PyObject* self);
94
+ PyObject* THPVariable_getitem(PyObject* self, PyObject* index);
95
+ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* value);
96
+
97
+ Variable valueToTensor(
98
+ c10::TensorOptions options,
99
+ PyObject* value,
100
+ const at::Device& device);
101
+
102
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable_hooks.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+
5
+ namespace torch::autograd {
6
+
7
+ struct TORCH_API SavedVariableHooks {
8
+ virtual void call_pack_hook(const at::Tensor& tensor) = 0;
9
+ virtual at::Tensor call_unpack_hook() = 0;
10
+ virtual ~SavedVariableHooks() = default;
11
+ };
12
+
13
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sstream>
4
+
5
+ namespace torch {
6
+ namespace autograd {
7
+ namespace utils {
8
+
9
+ inline std::string requires_grad_leaf_error(bool requires_grad) {
10
+ std::ostringstream oss;
11
+ oss << "you can only change requires_grad flags of leaf variables.";
12
+ if (requires_grad == false) {
13
+ oss << " If you want to use a computed variable in a subgraph "
14
+ "that doesn't require differentiation use "
15
+ "var_no_grad = var.detach().";
16
+ }
17
+ return oss.str();
18
+ }
19
+
20
+ } // namespace utils
21
+ } // namespace autograd
22
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+
5
+ namespace torch {
6
+ namespace autograd {
7
+ namespace utils {
8
+
9
+ // Helper functions to enforce the "Gradient Layout Contract" described in
10
+ // torch/csrc/autograd/functions/accumulate_grad.h.
11
+
12
+ // Checks if grad obeys the contract with variable.
13
+ inline bool obeys_layout_contract(
14
+ const at::Tensor& grad,
15
+ const at::Tensor& variable) {
16
+ TORCH_INTERNAL_ASSERT(!grad.is_sparse());
17
+ TORCH_INTERNAL_ASSERT(!grad.is_sparse_csr());
18
+ TORCH_INTERNAL_ASSERT(!variable.is_sparse_csr());
19
+
20
+ // NOLINTNEXTLINE(bugprone-branch-clone)
21
+ if (variable.is_nested()) {
22
+ // TODO: Nested Tensor does not have an implementation of detach. The
23
+ // current implementation of nested tensor likely does obey the gradient
24
+ // contract and should return true, but this would likely change in the
25
+ // future
26
+ return false;
27
+ } else if (variable.is_sparse()) {
28
+ // Gradient Layout Contract is not applicable for sparse layouts
29
+ return false;
30
+ } else if (variable.is_non_overlapping_and_dense()) {
31
+ // Only look at stride for dimensions that are not of size 1.
32
+ const auto& grad_sizes = grad.sym_sizes();
33
+ const auto& grad_strides = grad.sym_strides();
34
+ const auto& variable_strides = variable.sym_strides();
35
+ for (const auto idx : c10::irange(grad_sizes.size())) {
36
+ if (grad_sizes[idx] != 1) {
37
+ if (grad_strides[idx] != variable_strides[idx]) {
38
+ return false;
39
+ }
40
+ } else {
41
+ // This should not be needed but we don't check if a Tensor has views
42
+ // before stashing it. And 0-strided Tensors of size 1 are actually
43
+ // views for ops like cat.
44
+ // TODO: Actually detect views in the accumulateGrad function so that
45
+ // this Tensor is not considered at all.
46
+ if (grad_strides[idx] == 0) {
47
+ return false;
48
+ }
49
+ }
50
+ }
51
+ return true;
52
+ } else {
53
+ return grad.is_contiguous(at::MemoryFormat::Contiguous);
54
+ }
55
+ }
56
+
57
+ // Creates a clone of new_grad that obeys the contract with variable.
58
+ // The clone should attach to new_grad's history if GradMode::is_enabled().
59
+ inline at::Tensor clone_obey_contract(
60
+ const at::Tensor& new_grad,
61
+ const at::Tensor& variable) {
62
+ if (variable.is_non_overlapping_and_dense()) {
63
+ // (1)
64
+ // Does this dicey-looking sequence attach the result to new_grad's
65
+ // history if GradMode::is_enabled()? Yes, and @alband says it should.
66
+ return std::move(new_grad
67
+ .new_empty_strided_symint(
68
+ variable.sym_sizes(),
69
+ variable.sym_strides(),
70
+ variable.options().memory_format(c10::nullopt))
71
+ .copy_(new_grad));
72
+ } else {
73
+ // (2)
74
+ return new_grad.clone(at::MemoryFormat::Contiguous);
75
+ }
76
+ }
77
+
78
+ } // namespace utils
79
+ } // namespace autograd
80
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/function_hook.h>
4
+
5
+ namespace torch {
6
+ namespace autograd {
7
+ namespace utils {
8
+
9
+ // Turns lambda into a torch::autograd::FunctionPostHook.
10
+ class LambdaPostHook : public torch::autograd::FunctionPostHook {
11
+ using variable_list = std::vector<torch::autograd::Variable>;
12
+ using fn_type =
13
+ std::function<variable_list(const variable_list&, const variable_list&)>;
14
+ using compiled_fn_type = std::function<void(CompiledNodeArgs&)>;
15
+
16
+ public:
17
+ // The lambda function takes as arguments the outputs and inputs of the
18
+ // autograd function and can modify the outputs of the autograd function by
19
+ // returning a new output if needed.
20
+ /* implicit */ LambdaPostHook(fn_type fn) : fn_(std::move(fn)) {}
21
+
22
+ LambdaPostHook(fn_type fn, compiled_fn_type compiled_fn)
23
+ : fn_(std::move(fn)), compiled_fn_(std::move(compiled_fn)) {}
24
+
25
+ variable_list operator()(
26
+ const variable_list& outputs,
27
+ const variable_list& inputs) override {
28
+ return fn_(outputs, inputs);
29
+ }
30
+
31
+ void compiled_args(CompiledNodeArgs& args) override {}
32
+
33
+ protected:
34
+ std::function<variable_list(const variable_list&, const variable_list&)> fn_;
35
+ compiled_fn_type compiled_fn_;
36
+ };
37
+
38
+ } // namespace utils
39
+ } // namespace autograd
40
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <torch/csrc/python_headers.h>
5
+
6
+ #include <torch/csrc/utils/python_arg_parser.h>
7
+
8
+ namespace torch {
9
+ namespace autograd {
10
+ namespace utils {
11
+
12
+ // The parameter allow_copy is to accept copy for Tensor.to (and by proxy
13
+ // PackedSequences.to) but not nn.Module.to.
14
+ inline std::tuple<
15
+ c10::optional<at::Device>,
16
+ c10::optional<at::ScalarType>,
17
+ bool,
18
+ bool,
19
+ c10::optional<at::MemoryFormat>>
20
+ parse_to_conversion(PythonArgs& r, bool allow_copy) {
21
+ if (r.idx == 0) {
22
+ if (!allow_copy && !r.isNone(3))
23
+ throw std::runtime_error(".to() does not accept copy argument");
24
+ return std::make_tuple(
25
+ r.deviceOptional(0),
26
+ r.scalartypeOptional(1),
27
+ r.toBool(2),
28
+ r.toBool(3),
29
+ r.memoryformatOptional(4));
30
+ } else if (r.idx == 1) {
31
+ if (!allow_copy && !r.isNone(2))
32
+ throw std::runtime_error(".to() does not accept copy argument");
33
+ return std::make_tuple(
34
+ c10::nullopt,
35
+ r.scalartype(0),
36
+ r.toBool(1),
37
+ r.toBool(2),
38
+ r.memoryformatOptional(3));
39
+ } else {
40
+ auto tensor = r.tensor(0);
41
+ if (!allow_copy && !r.isNone(2))
42
+ throw std::runtime_error(".to() does not accept copy argument");
43
+ return std::make_tuple(
44
+ tensor.device(),
45
+ tensor.scalar_type(),
46
+ r.toBool(1),
47
+ r.toBool(2),
48
+ r.memoryformatOptional(3));
49
+ }
50
+ }
51
+ } // namespace utils
52
+ } // namespace autograd
53
+ } // namespace torch