diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h new file mode 100644 index 0000000000000000000000000000000000000000..3bafd470f1f7be827de490aa9c086441c38aa32f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h @@ -0,0 +1,6 @@ +#pragma once + +#include + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays) +extern PyMethodDef DataLoaderMethods[]; diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h new file mode 100644 index 0000000000000000000000000000000000000000..665c38bf035d45eafc0575f76a49cacfb9169371 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +#include + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct TORCH_API THPDevice { + PyObject_HEAD at::Device device; +}; + +TORCH_API extern PyTypeObject THPDeviceType; + +inline bool THPDevice_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPDeviceType; +} + +TORCH_API PyObject* THPDevice_New(const at::Device& device); + +TORCH_API void THPDevice_init(PyObject* module); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h new file mode 100644 index 0000000000000000000000000000000000000000..1fd0a9d418fb3f7231e02432429eb51749bbed9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h @@ -0,0 +1,36 @@ +#pragma once + +// Provides conversions between Python tensor objects and at::Tensor. + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct THPDtype; +struct THPLayout; + +namespace c10 { +struct Storage; +} + +namespace torch { +void registerDtypeObject(THPDtype* dtype, at::ScalarType scalarType); +void registerLayoutObject(THPLayout* thp_layout, at::Layout layout); + +TORCH_PYTHON_API PyObject* createPyObject(const at::Storage& storage); +at::Storage createStorage(PyObject* obj); +std::tuple createStorageGetType( + PyObject* obj); +bool isStorage(PyObject* obj); + +TORCH_PYTHON_API THPDtype* getTHPDtype(at::ScalarType scalarType); +THPLayout* getTHPLayout(at::Layout layout); +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..6b8d923f40909be5f9e2ebc7e95ee69fb0a8842f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h @@ -0,0 +1,390 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(USE_DISTRIBUTED) && defined(USE_C10D) +#include +#endif + +static inline void PyErr_SetString(PyObject* type, const std::string& message) { + PyErr_SetString(type, message.c_str()); +} +/// NOTE [ Conversion Cpp Python Warning ] +/// The warning handler cannot set python warnings immediately +/// as it requires acquiring the GIL (potential deadlock) +/// and would need to cleanly exit if the warning raised a +/// python error. To solve this, we buffer the warnings and +/// process them when we go back to python. +/// This requires the two try/catch blocks below to handle the +/// following cases: +/// - If there is no Error raised in the inner try/catch, the +/// buffered warnings are processed as python warnings. +/// - If they don't raise an error, the function process with the +/// original return code. +/// - If any of them raise an error, the error is set (PyErr_*) and +/// the destructor will raise a cpp exception python_error() that +/// will be caught by the outer try/catch that will be able to change +/// the return value of the function to reflect the error. +/// - If an Error was raised in the inner try/catch, the inner try/catch +/// must set the python error. The buffered warnings are then +/// processed as cpp warnings as we cannot predict before hand +/// whether a python warning will raise an error or not and we +/// cannot handle two errors at the same time. +/// This advanced handler will only be used in the current thread. +/// If any other thread is used, warnings will be processed as +/// cpp warnings. +#define HANDLE_TH_ERRORS \ + try { \ + torch::PyWarningHandler __enforce_warning_buffer; \ + try { +#define _CATCH_GENERIC_ERROR(ErrorType, PythonErrorType, retstmnt) \ + catch (const c10::ErrorType& e) { \ + auto msg = torch::get_cpp_stacktraces_enabled() \ + ? e.what() \ + : e.what_without_backtrace(); \ + PyErr_SetString(PythonErrorType, torch::processErrorMsg(msg)); \ + retstmnt; \ + } + +// Only catch torch-specific exceptions +#define CATCH_CORE_ERRORS(retstmnt) \ + catch (python_error & e) { \ + e.restore(); \ + retstmnt; \ + } \ + catch (py::error_already_set & e) { \ + e.restore(); \ + retstmnt; \ + } \ + _CATCH_GENERIC_ERROR(IndexError, PyExc_IndexError, retstmnt) \ + _CATCH_GENERIC_ERROR(ValueError, PyExc_ValueError, retstmnt) \ + _CATCH_GENERIC_ERROR(TypeError, PyExc_TypeError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + NotImplementedError, PyExc_NotImplementedError, retstmnt) \ + _CATCH_GENERIC_ERROR(LinAlgError, THPException_LinAlgError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + OutOfMemoryError, THPException_OutOfMemoryError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + DistBackendError, THPException_DistBackendError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + DistNetworkError, THPException_DistNetworkError, retstmnt) \ + _CATCH_GENERIC_ERROR(DistStoreError, THPException_DistStoreError, retstmnt) \ + _CATCH_GENERIC_ERROR(DistError, THPException_DistError, retstmnt) \ + _CATCH_GENERIC_ERROR(Error, PyExc_RuntimeError, retstmnt) \ + catch (torch::PyTorchError & e) { \ + auto msg = torch::processErrorMsg(e.what()); \ + PyErr_SetString(e.python_type(), msg); \ + retstmnt; \ + } + +#define CATCH_TH_ERRORS(retstmnt) CATCH_CORE_ERRORS(retstmnt) + +#define CATCH_ALL_ERRORS(retstmnt) \ + CATCH_TH_ERRORS(retstmnt) \ + catch (const std::exception& e) { \ + auto msg = torch::processErrorMsg(e.what()); \ + PyErr_SetString(PyExc_RuntimeError, msg); \ + retstmnt; \ + } + +#define END_HANDLE_TH_ERRORS_PYBIND \ + } \ + catch (...) { \ + __enforce_warning_buffer.set_in_exception(); \ + throw; \ + } \ + } \ + catch (py::error_already_set & e) { \ + throw; \ + } \ + catch (py::builtin_exception & e) { \ + throw; \ + } \ + catch (torch::jit::JITException & e) { \ + throw; \ + } \ + catch (const std::exception& e) { \ + torch::translate_exception_to_python(std::current_exception()); \ + throw py::error_already_set(); \ + } + +#define END_HANDLE_TH_ERRORS_RET(retval) \ + } \ + catch (...) { \ + __enforce_warning_buffer.set_in_exception(); \ + throw; \ + } \ + } \ + catch (const std::exception& e) { \ + torch::translate_exception_to_python(std::current_exception()); \ + return retval; \ + } + +#define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(nullptr) + +extern PyObject *THPException_FatalError, *THPException_LinAlgError, + *THPException_OutOfMemoryError, *THPException_DistError, + *THPException_DistBackendError, *THPException_DistNetworkError, + *THPException_DistStoreError; + +// Throwing this exception means that the python error flags have been already +// set and control should be immediately returned to the interpreter. +struct python_error : public std::exception { + python_error() = default; + + python_error(const python_error& other) + : type(other.type), + value(other.value), + traceback(other.traceback), + message(other.message) { + pybind11::gil_scoped_acquire gil; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(traceback); + } + + python_error(python_error&& other) noexcept + : type(other.type), + value(other.value), + traceback(other.traceback), + message(std::move(other.message)) { + other.type = nullptr; + other.value = nullptr; + other.traceback = nullptr; + } + + // NOLINTNEXTLINE(bugprone-exception-escape) + ~python_error() override { + if (type || value || traceback) { + pybind11::gil_scoped_acquire gil; + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + } + } + + const char* what() const noexcept override { + return message.c_str(); + } + + void build_message() { + // Ensure we have the GIL. + pybind11::gil_scoped_acquire gil; + + // No errors should be set when we enter the function since PyErr_Fetch + // clears the error indicator. + TORCH_INTERNAL_ASSERT(!PyErr_Occurred()); + + // Default message. + message = "python_error"; + + // Try to retrieve the error message from the value. + if (value != nullptr) { + // Reference count should not be zero. + TORCH_INTERNAL_ASSERT(Py_REFCNT(value) > 0); + + PyObject* pyStr = PyObject_Str(value); + if (pyStr != nullptr) { + PyObject* encodedString = + PyUnicode_AsEncodedString(pyStr, "utf-8", "strict"); + if (encodedString != nullptr) { + char* bytes = PyBytes_AS_STRING(encodedString); + if (bytes != nullptr) { + // Set the message. + message = std::string(bytes); + } + Py_XDECREF(encodedString); + } + Py_XDECREF(pyStr); + } + } + + // Clear any errors since we don't want to propagate errors for functions + // that are trying to build a string for the error message. + PyErr_Clear(); + } + + /** Saves the exception so that it can be re-thrown on a different thread */ + inline void persist() { + if (type) + return; // Don't overwrite exceptions + // PyErr_Fetch overwrites the pointers + pybind11::gil_scoped_acquire gil; + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + PyErr_Fetch(&type, &value, &traceback); + build_message(); + } + + /** Sets the current Python error from this exception */ + inline void restore() { + if (!type) + return; + // PyErr_Restore steals references + pybind11::gil_scoped_acquire gil; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(traceback); + PyErr_Restore(type, value, traceback); + } + + PyObject* type{nullptr}; + PyObject* value{nullptr}; + PyObject* traceback{nullptr}; + + // Message to return to the user when 'what()' is invoked. + std::string message; +}; + +bool THPException_init(PyObject* module); + +namespace torch { + +// Set python current exception from a C++ exception +TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&); + +TORCH_PYTHON_API std::string processErrorMsg(std::string str); + +// Abstract base class for exceptions which translate to specific Python types +struct PyTorchError : public std::exception { + PyTorchError() = default; + PyTorchError(std::string msg_) : msg(std::move(msg_)) {} + virtual PyObject* python_type() = 0; + const char* what() const noexcept override { + return msg.c_str(); + } + std::string msg; +}; + +// Declare a printf-like function on gcc & clang +// The compiler can then warn on invalid format specifiers +#ifdef __GNUC__ +#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \ + __attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX))) +#else +#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) +#endif + +// Translates to Python TypeError +struct TypeError : public PyTorchError { + using PyTorchError::PyTorchError; + TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_TypeError; + } +}; + +// Translates to Python AttributeError +struct AttributeError : public PyTorchError { + AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_AttributeError; + } +}; + +// ATen warning handler for Python +struct PyWarningHandler { + // Move actual handler into a separate class with a noexcept + // destructor. Otherwise, we need to force all WarningHandler + // subclasses to have a noexcept(false) destructor. + struct InternalHandler : at::WarningHandler { + ~InternalHandler() override = default; + void process(const c10::Warning& warning) override; + + std::vector warning_buffer_; + }; + + public: + /// See NOTE [ Conversion Cpp Python Warning ] for noexcept justification + TORCH_PYTHON_API PyWarningHandler() noexcept(true); + // NOLINTNEXTLINE(bugprone-exception-escape) + TORCH_PYTHON_API ~PyWarningHandler() noexcept(false); + + /** Call if an exception has been thrown + + * Necessary to determine if it is safe to throw from the desctructor since + * std::uncaught_exception is buggy on some platforms and generally + * unreliable across dynamic library calls. + */ + void set_in_exception() { + in_exception_ = true; + } + + private: + InternalHandler internal_handler_; + at::WarningHandler* prev_handler_; + bool in_exception_; +}; + +namespace detail { + +struct noop_gil_scoped_release { + // user-defined constructor (i.e. not defaulted) to avoid + // unused-variable warnings at usage sites of this class + noop_gil_scoped_release() {} +}; + +template +using conditional_gil_scoped_release = std::conditional_t< + release_gil, + pybind11::gil_scoped_release, + noop_gil_scoped_release>; + +template +using Arg = typename invoke_traits::template arg::type; + +template +auto wrap_pybind_function_impl_( + // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) + Func&& f, + std::index_sequence, + std::bool_constant) { + namespace py = pybind11; + + // f=f is needed to handle function references on older compilers + return [f = std::forward(f)](Arg... args) { + HANDLE_TH_ERRORS + conditional_gil_scoped_release no_gil; + return c10::guts::invoke(f, std::forward>(args)...); + END_HANDLE_TH_ERRORS_PYBIND + }; +} +} // namespace detail + +// Wrap a function with TH error and warning handling. +// Returns a function object suitable for registering with pybind11. +template +auto wrap_pybind_function(Func&& f) { + using traits = invoke_traits; + return torch::detail::wrap_pybind_function_impl_( + std::forward(f), + std::make_index_sequence{}, + std::false_type{}); +} + +// Wrap a function with TH error, warning handling and releases the GIL. +// Returns a function object suitable for registering with pybind11. +template +auto wrap_pybind_function_no_gil(Func&& f) { + using traits = invoke_traits; + return torch::detail::wrap_pybind_function_impl_( + std::forward(f), + std::make_index_sequence{}, + std::true_type{}); +} + +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h new file mode 100644 index 0000000000000000000000000000000000000000..4bcd910f1279534c63b507beb75063953b6d5de3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +#ifdef THP_BUILD_MAIN_LIB +#define TORCH_PYTHON_API C10_EXPORT +#else +#define TORCH_PYTHON_API C10_IMPORT +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h new file mode 100644 index 0000000000000000000000000000000000000000..f5b7b4661eb5851ac77a6dc25192b65a6e125b0a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THPGenerator { + PyObject_HEAD at::Generator cdata; +}; + +// Creates a new Python object wrapping the default at::Generator. The reference +// is borrowed. The caller should ensure that the at::Generator object lifetime +// last at least as long as the Python wrapper. +TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator( + at::Generator cdata); + +#define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass) + +TORCH_PYTHON_API extern PyObject* THPGeneratorClass; + +bool THPGenerator_init(PyObject* module); + +TORCH_PYTHON_API PyObject* THPGenerator_Wrap(at::Generator gen); + +// Creates a new Python object for a Generator. The Generator must not already +// have a PyObject* associated with it. +PyObject* THPGenerator_NewWithVar(PyTypeObject* type, at::Generator gen); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..7f60a0ba0282c39cb8c72876a4288560ec280b93 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +#include + +#include + +const int MEMORY_FORMAT_NAME_LEN = 64; + +struct THPMemoryFormat { + PyObject_HEAD at::MemoryFormat memory_format; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[MEMORY_FORMAT_NAME_LEN + 1]; +}; + +extern PyTypeObject THPMemoryFormatType; + +inline bool THPMemoryFormat_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPMemoryFormatType; +} + +PyObject* THPMemoryFormat_New( + at::MemoryFormat memory_format, + const std::string& name); + +void THPMemoryFormat_init(PyObject* module); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h new file mode 100644 index 0000000000000000000000000000000000000000..71ff8c4fcb85e2c9e55fb4c0660ef506b6fda6e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h @@ -0,0 +1,6 @@ +#ifndef THP_MODULE_INC +#define THP_MODULE_INC + +#define THP_STATELESS_ATTRIBUTE_NAME "_torch" + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h new file mode 100644 index 0000000000000000000000000000000000000000..dd4283f7d77234cbd3dac815456981b22a4dad00 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include + +extern PyTypeObject THPSizeType; + +#define THPSize_Check(obj) (Py_TYPE(obj) == &THPSizeType) + +PyObject* THPSize_New(const torch::autograd::Variable& t); +PyObject* THPSize_NewFromSizes(int64_t dim, const int64_t* sizes); +PyObject* THPSize_NewFromSymSizes(const at::Tensor& t); + +void THPSize_init(PyObject* module); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h new file mode 100644 index 0000000000000000000000000000000000000000..16bf87bbcc2ea2076d07eb9d2c612e7082864ed5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h @@ -0,0 +1,60 @@ +#ifndef THP_STORAGE_INC +#define THP_STORAGE_INC + +#include +#include +#include +#include +#include + +#define THPStorageStr "torch.UntypedStorage" + +struct THPStorage { + PyObject_HEAD; + c10::MaybeOwned cdata; + bool is_hermetic; +}; + +TORCH_PYTHON_API PyObject* THPStorage_Wrap(c10::Storage storage); +TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage( + PyTypeObject* type, + c10::Storage _storage, + c10::impl::PyInterpreterStatus status, + bool allow_preexisting_pyobj = false); +extern PyTypeObject* THPStorageClass; + +static inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) { + return tp == THPStorageClass; +} + +static inline bool THPStorage_CheckExact(PyObject* obj) { + return THPStorage_CheckTypeExact(Py_TYPE(obj)); +} + +inline bool THPStorage_Check(PyObject* obj) { + if (!THPStorageClass) + return false; + + const auto result = PyObject_IsInstance(obj, (PyObject*)THPStorageClass); + if (result == -1) + throw python_error(); + return result; +} + +bool THPStorage_init(PyObject* module); +void THPStorage_postInit(PyObject* module); + +void THPStorage_assertNotNull(THPStorage* storage); +void THPStorage_assertNotNull(PyObject* obj); + +extern PyTypeObject THPStorageType; + +inline const c10::Storage& THPStorage_Unpack(THPStorage* storage) { + return *storage->cdata; +} + +inline const c10::Storage& THPStorage_Unpack(PyObject* obj) { + return THPStorage_Unpack(reinterpret_cast(obj)); +} + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h new file mode 100644 index 0000000000000000000000000000000000000000..bd0825fa30142ba7101510765b1b230142ab4f0c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h @@ -0,0 +1,8 @@ +#ifndef THP_STORAGE_METHODS_INC +#define THP_STORAGE_METHODS_INC + +#include + +PyMethodDef* THPStorage_getMethods(); + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h new file mode 100644 index 0000000000000000000000000000000000000000..01a20cb01dff68de245f3c8b16ca1914beb76a1c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h @@ -0,0 +1,13 @@ +#ifndef THP_TYPES_INC +#define THP_TYPES_INC + +#include + +#ifndef INT64_MAX +#include +#endif + +template +struct THPTypeInfo {}; + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h new file mode 100644 index 0000000000000000000000000000000000000000..c78f2b80c806a139b111525cbfd4434b14a2cc49 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h @@ -0,0 +1,1101 @@ +#pragma once + +// NB: Must be at the top of file to avoid including the deprecated "math.h". +// https://stackoverflow.com/questions/6563810/m-pi-works-with-math-h-but-not-with-cmath-in-visual-studio +#ifdef _MSC_VER +#ifndef _USE_MATH_DEFINES +#define _USE_MATH_DEFINES +#endif +#include +#endif + +#include +#include + +namespace torch::autograd::generated::details { + +extern const char* kCudnnDoubleBackwardMsg; + +// A simple way to imperatively compute index ranges for slots +// that have been flattened +struct TORCH_API IndexRangeGenerator { + IndexRange range(size_t range_size) { + i += range_size; + return {i - range_size, i}; + } + size_t size() { + return i; + } + + private: + size_t i = 0; +}; + +TORCH_API Tensor toNonOptFwGrad(const c10::optional& t); +TORCH_API Tensor toNonOptPrimal(const c10::optional& t); +TORCH_API Tensor toNonOptTensor(const c10::optional& t); + +TORCH_API inline c10::optional wrap_opt_if( + const Tensor& t, + const bool cond) { + using OptTensor = c10::optional; + return cond ? OptTensor(t) : static_cast(c10::nullopt); +} + +TORCH_API Tensor +apply_loss_reduction(const Tensor& unreduced, int64_t reduction); +TORCH_API bool any_variable_defined(const variable_list& variables); +TORCH_API void copy_range( + variable_list& out, + IndexRange range, + const at::Tensor& t); +TORCH_API void copy_range( + variable_list& out, + IndexRange range, + at::ArrayRef t); +TORCH_API at::Tensor copysign_tensor_self_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& result); +TORCH_API at::Tensor not_implemented(const char* name, const char* reason = ""); +TORCH_API std::vector not_implemented_list( + const char* name, + const char* reason = ""); +at::Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result); +at::Tensor maybe_multiply(const at::Tensor& t, const at::Scalar& s); +int64_t _safe_size(IntArrayRef sizes, IntArrayRef dim); +Tensor restore_reduced_dims( + const Tensor& output, + IntArrayRef dims, + bool keepdim); +Tensor scale_grad_by_count( + const Tensor& grad, + const Tensor& mask, + IntArrayRef dims); +at::Tensor norm_backward( + const at::Tensor& grad, + const at::Tensor& self, + const optional& p_, + const at::Tensor& norm); +at::Tensor norm_backward( + at::Tensor grad, + const at::Tensor& self, + const optional& p_, + at::Tensor norm, + at::IntArrayRef dim, + bool keepdim); +Tensor norm_jvp( + const Tensor& self_p, + const Tensor& self_t, + const optional& p_, + Tensor norm, + IntArrayRef dim, + bool keepdim); +Tensor norm_jvp( + const Tensor& grad, + const Tensor& self, + const optional& p_, + Tensor norm); +Tensor _nested_from_padded_backward( + const Tensor& grad, + const Tensor& input, + const bool do_transform_0213); +std::tuple linear_double_backward( + const variable_list& grads, + const Tensor& self, + const Tensor& grad_output, + const Tensor& weight); +Tensor linalg_vector_norm_jvp( + const Tensor& self_p, + const Tensor& self_t, + const Scalar& scalar_ord, + Tensor norm, + const at::OptionalIntArrayRef& opt_dim, + bool keepdim); +at::Tensor linalg_vector_norm_backward( + at::Tensor grad, + const at::Tensor& self, + const at::Scalar& ord, + at::Tensor norm, + const at::OptionalIntArrayRef& opt_dim, + bool keepdim); +at::Tensor pow_backward( + at::Tensor grad, + const at::Tensor& self, + const at::Scalar& exponent_); +at::Tensor pow_backward_self( + const at::Tensor& grad, + const at::Tensor& self, + const at::Tensor& exponent); +at::Tensor pow_backward_exponent( + const at::Tensor& grad, + const at::Tensor& self, + const at::Tensor& exponent, + const at::Tensor& result); +at::Tensor pow_backward_exponent( + const at::Tensor& grad, + const at::Scalar& base, + const at::Tensor& exponent, + const at::Tensor& result); +at::Tensor angle_backward(const at::Tensor& grad, const at::Tensor& self); +template +at::Tensor mul_tensor_backward(const Tensor& grad, T other, ScalarType self_st); +template +at::Tensor div_tensor_self_backward( + const Tensor& grad, + T other, + ScalarType self_st); +at::Tensor div_tensor_other_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& other); +template +at::Tensor div_tensor_self_backward( + const Tensor& grad, + T other, + ScalarType self_st, + const c10::optional& rounding_mode); +at::Tensor div_tensor_other_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& other, + const c10::optional& rounding_mode); +at::Tensor mvlgamma_backward( + const at::Tensor& grad, + const at::Tensor& self, + int64_t p); +at::Tensor permute_backwards(const at::Tensor& grad, at::IntArrayRef fwd_dims); +at::Tensor rad2deg_backward(const at::Tensor& grad); +at::Tensor deg2rad_backward(const at::Tensor& grad); +at::Tensor unsqueeze_multiple( + const at::Tensor& t, + at::OptionalIntArrayRef opt_dim, + size_t n_dims); +at::Tensor sum_backward( + const at::Tensor& grad, + at::SymIntArrayRef sizes, + at::OptionalIntArrayRef opt_dims, + bool keepdim); +at::Tensor sum_backward( + const at::Tensor& grad, + c10::SymIntArrayRef sizes, + c10::IntArrayRef dims, + bool keepdim); +at::Tensor nansum_backward( + const at::Tensor& grad, + const at::Tensor& self, + at::OptionalIntArrayRef dims, + bool keepdim); +std::vector reverse_list(const at::IntArrayRef list); +std::vector reverse_list_symint(const c10::SymIntArrayRef list); +at::Tensor reverse_dim(const at::Tensor& t, int64_t dim); +at::Tensor prod_safe_zeros_backward( + const at::Tensor& grad, + const at::Tensor& inp, + int64_t dim); +at::Tensor prod_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& result); +at::Tensor prod_backward( + at::Tensor grad, + const at::Tensor& input, + at::Tensor result, + int64_t dim, + bool keepdim); +at::Tensor solve_jvp( + const Tensor& X, + const Tensor& A, + const Tensor& dA, + const Tensor& dB); +at::Tensor solve_backward_self( + const at::Tensor& grad, + const at::Tensor& self, + const at::Tensor& A); +at::Tensor solve_backward_A( + const at::Tensor& grad, + const at::Tensor& self, + const at::Tensor& A, + const at::Tensor& solution); +at::Tensor cumsum_backward(const at::Tensor& grad, int64_t dim); +at::Tensor logsumexp_backward( + at::Tensor grad, + const at::Tensor& self, + at::Tensor result, + at::IntArrayRef dim, + bool keepdim); +at::Tensor logsumexp_jvp( + const at::Tensor& self_p, + const at::Tensor& self_t, + IntArrayRef dim, + bool keepdim); +at::Tensor logcumsumexp_backward( + at::Tensor grad, + const at::Tensor& self, + at::Tensor result, + int64_t dim); +at::Tensor logcumsumexp_jvp( + const at::Tensor& self_p, + const at::Tensor& self_t, + int64_t dim); +at::Tensor unbind_backward(const variable_list& grads, int64_t dim); +at::Tensor unbind_backward_nested( + const variable_list& grads, + const Tensor& nt_sizes, + int64_t dim, + const at::TensorOptions& options); +at::Tensor unsqueeze_to(const at::Tensor& self, c10::SymIntArrayRef sym_sizes); +at::Tensor unsqueeze_to( + const at::Tensor& self, + int64_t dim, + c10::SymIntArrayRef sym_sizes); +at::Tensor unsqueeze_to( + const at::Tensor& self, + IntArrayRef dim, + c10::SymIntArrayRef sym_sizes); +std::vector cat_tensors_backward( + const at::Tensor& grad, + const std::vector>& sizes, + const std::vector& dtypes, + int64_t dim); +std::vector stack_tensors_backward( + const at::Tensor& grad, + int64_t dim, + const std::vector& dtypes); +std::vector block_diag_backward( + const at::Tensor& grad, + const std::vector>& sizes, + const std::vector& dtypes); +at::Tensor clamp_backward( + const at::Tensor& grad, + const at::Tensor& self, + const optional& min, + const optional& max); +at::Tensor clamp_backward( + const at::Tensor& grad, + const at::Tensor& self, + const at::Tensor& min, + const at::Tensor& max); +std::tuple clamp_backward_min_max( + const at::Tensor& grad, + const at::Tensor& self, + const at::Tensor& min, + const at::Tensor& max, + const std::array&); +at::Tensor clamp_jvp( + const Tensor& self_p, + const Tensor& self_t, + const Tensor& min_p, + const Tensor& min_t, + const Tensor& max_p, + const Tensor& max_t); +at::SymIntArrayRef strides_or_error( + const Tensor& input, + c10::string_view const& input_name); +at::Tensor mm_mat1_backward( + const Tensor& grad, + const Tensor& mat2, + at::SymIntArrayRef mat1_sizes, + at::SymIntArrayRef mat1_strides, + c10::Layout mat1_layout, + const Scalar& alpha); +at::Tensor mm_mat2_backward( + const at::Tensor& grad, + const at::Tensor& mat1, + at::SymIntArrayRef sizes, + at::SymIntArrayRef strides, + c10::Layout layout, + const at::Scalar& alpha); +at::Tensor mm_mat1_sparse_backward( + const at::Tensor& grad, + const at::Tensor& mat1, + const at::Tensor& mat2, + const at::Scalar& alpha); +std::tuple sparse_sampled_addmm_backward( + const Tensor& grad, + const Tensor& self, + const c10::optional& mat1, + const c10::optional& mat2, + const Scalar& alpha, + const Scalar& beta, + const std::array& grad_input_mask); +at::Tensor sparse_mask_backward( + const at::Tensor& grad, + const at::Tensor& mask, + c10::Layout self_layout); +at::Tensor sparse_sparse_matmul_backward( + const at::Tensor& grad, + const at::Tensor& mat1, + const at::Tensor& mat2, + int64_t grad_order); +at::Tensor renorm_backward( + const at::Tensor& grad, + const at::Tensor& self, + const at::Scalar& p, + int64_t dim, + const at::Scalar& maxnorm); +at::Tensor renorm_jvp( + const at::Tensor& self_p, + const at::Tensor& self_t, + const at::Scalar& p, + int64_t dim, + const at::Scalar& maxnorm); +at::Tensor repeat_backward( + at::Tensor grad, + at::SymIntArrayRef repeats, + at::SymIntArrayRef input_shape); +at::Tensor _fused_dropout_backward( + const at::Tensor& grad, + const at::Tensor& mask, + double p1m); +at::Tensor infinitely_differentiable_native_dropout_backward( + const at::Tensor& grad, + const at::Tensor& mask, + double scale); +at::Tensor native_dropout_double_backward( + const at::Tensor& ggI, + const at::Tensor& grad, + const at::Tensor& mask, + double scale); +at::Tensor evenly_distribute_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& value); +Tensor sgn_backward(const Tensor& x, const Tensor& gx, const Tensor& sgn); +Tensor masked_fill_backward(const Tensor& grad, const Tensor& mask); +at::Tensor var_backward( + at::Tensor grad, + const at::Tensor& self, + at::OptionalIntArrayRef dim, + const c10::optional& correction, + bool keepdim); +at::Tensor var_jvp( + const at::Tensor& self_t, + const at::Tensor& self_p, + const at::Tensor& result, + at::OptionalIntArrayRef dim_opt, + const c10::optional& correction, + bool keepdim); +at::Tensor std_backward( + const at::Tensor& result, + const at::Tensor& grad, + const at::Tensor& self, + at::OptionalIntArrayRef dim, + const c10::optional& correction, + bool keepdim); +Tensor mean_backward( + const Tensor& grad, + c10::SymIntArrayRef shape, + at::OptionalIntArrayRef opt_dim, + c10::SymInt numel, + bool keepdim); +Tensor var_mean_backward( + const Tensor& gvar, + const Tensor& gmean, + const Tensor& self, + at::OptionalIntArrayRef dim_opt, + const c10::optional& correction, + bool keepdim); +Tensor std_mean_backward( + const Tensor& gstd, + const Tensor& gmean, + const Tensor& self, + const Tensor& std, + at::OptionalIntArrayRef dim_opt, + const c10::optional& correction, + bool keepdim); +at::Tensor cholesky_backward( + const at::Tensor& grad, + bool upper, + const at::Tensor& L); +at::Tensor cholesky_jvp( + const at::Tensor& input_tangent, + const at::Tensor& L, + bool upper); +at::Tensor cholesky_inverse_backward( + const at::Tensor& grad, + const at::Tensor& L, + bool upper, + const at::Tensor& inverse); +at::Tensor cholesky_inverse_jvp( + const at::Tensor& F, + const at::Tensor& dF, + const at::Tensor& X, + bool upper); +Tensor pinv_jvp(const Tensor& A, const Tensor& pinvA, const Tensor& dA); +Tensor pinv_backward(const Tensor& grad, const Tensor& pinvA, const Tensor& A); +at::Tensor split_with_sizes_backward( + const std::vector& grads, + c10::SymIntArrayRef split_sizes, + int64_t dim, + c10::SymIntArrayRef sizes, + const at::TensorOptions& options); +at::Tensor _nested_split_with_sizes_backward( + const std::vector& grads, + c10::SymIntArrayRef split_sizes, + int64_t dim, + const Tensor& nt_sizes, + const at::TensorOptions& options); +at::Tensor split_backward( + const std::vector& grads, + const c10::SymInt& split_size, + int64_t dim, + c10::SymIntArrayRef sizes, + const at::TensorOptions& options); +at::Tensor max_pool_double_backward( + const at::Tensor& grad, + const at::Tensor& indices, + int dim); +at::Tensor error_for_max_pool2d_double_backward(); +at::Tensor glu_double_backward( + const at::Tensor& grad, + const at::Tensor& grad_output, + const at::Tensor& input, + int64_t dim); +at::Tensor glu_double_backward_grad_output( + const at::Tensor& grad, + const at::Tensor& input, + int64_t dim); +at::Tensor infinitely_differentiable_silu_backward( + const at::Tensor& grad_output, + const at::Tensor& input); +at::Tensor infinitely_differentiable_mish_backward( + const at::Tensor& grad_output, + const at::Tensor& input); +Tensor infinitely_differentiable_logit_backward( + const Tensor& grad, + const Tensor& self, + c10::optional eps); +Tensor binary_cross_entropy_target_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& target, + const c10::optional& weight, + int64_t reduction); +Tensor binary_cross_entropy_double_backward_target( + const Tensor& grad, + const Tensor& grad_output, + const Tensor& self, + const Tensor& target, + const c10::optional& weight, + int64_t reduction); +Tensor binary_cross_entropy_with_logits_backward( + const Tensor& grad, + const Tensor& input, + const Tensor& target, + const c10::optional& weight_opt, + const c10::optional& pos_weight_opt, + int64_t reduction); +at::Tensor binary_cross_entropy_with_logits_target_backward( + const at::Tensor& grad_output, + const at::Tensor& self, + const at::Tensor& target, + const c10::optional& weight, + const c10::optional& pos_weight, + int64_t reduction); +at::Tensor log_sigmoid_double_backward( + const at::Tensor& grad, + const at::Tensor& input); +at::Tensor softmax_double_backward( + const at::Tensor& grad, + const at::Tensor& grad_output, + int dim, + const at::Tensor& output); +at::Tensor binary_cross_entropy_double_backward( + const at::Tensor& grad_output, + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& target, + const c10::optional& weight, + int64_t reduction); +at::Tensor binary_cross_entropy_double_backward_grad_output( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& target, + const c10::optional& weight, + int64_t reduction); +at::Tensor smooth_l1_loss_double_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& target, + int64_t reduction, + double beta); +at::Tensor huber_loss_double_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& target, + int64_t reduction, + double delta); +at::Tensor huber_loss_double_backward_grad_output( + const at::Tensor& grad, + const at::Tensor& grad_output, + const at::Tensor& input, + const at::Tensor& target, + int64_t reduction, + double delta); +at::Tensor mse_loss_double_backward( + const at::Tensor& grad, + const at::Tensor& input, + int64_t reduction); +at::Tensor soft_margin_loss_double_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& target, + int64_t reduction); +at::Tensor soft_margin_loss_double_backward_grad_output( + const at::Tensor& grad, + const at::Tensor& grad_output, + const at::Tensor& input, + const at::Tensor& target, + int64_t reduction); +at::Tensor softplus_double_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Scalar& beta, + const at::Scalar& threshold); +std::tuple slogdet_jvp( + const at::Tensor& LU, + const at::Tensor& pivots, + const at::Tensor& dA, + const at::Tensor& sign, + const bool use_A_T); +at::Tensor slogdet_backward( + const at::Tensor& grad_sign, + const at::Tensor& grad_logabsdet, + const at::Tensor& A, + const at::Tensor& signdet, + const at::Tensor& LU, + const at::Tensor& pivots); +at::Tensor log1p_backward(const at::Tensor& grad, const at::Tensor& self); +at::Tensor sinc_backward(const at::Tensor& grad, const at::Tensor& self); +at::Tensor sparse_constructor_values_backward( + const at::Tensor& sparse_grad_out, + const at::Tensor& indices); +at::Tensor embedding_dense_double_backward_symint( + const at::Tensor& grad, + const at::Tensor& indices, + const c10::SymInt& padding_idx); +at::Tensor index_backward( + at::Tensor zeros_like_self, + const torch::List>& indices, + const at::Tensor& grad); +at::Tensor _cudnn_ctc_loss_backward( + const at::Tensor& grad_out, + const at::Tensor& loss, + const at::Tensor& raw_grad, + bool zero_infinity); +at::Tensor elu_double_backward( + const Tensor& grad, + const Tensor& grad_output, + const Scalar& alpha, + const Scalar& scale, + const Scalar& input_scale, + bool is_result, + const Tensor& self_or_result); + +Tensor svd_backward( + const Tensor& gU, + const Tensor& gS, + const Tensor& gVh, + const Tensor& U, + const Tensor& S, + const Tensor& Vh); + +std::tuple linalg_svd_jvp( + const Tensor& dA, + const Tensor& U, + const Tensor& S, + const Tensor& Vh, + const bool full_matrices); +Tensor slice_backward_wrapper( + const at::Tensor& grad, + const c10::SymIntArrayRef& input_sizes, + int64_t dim, + c10::optional start, + c10::optional end, + c10::SymInt step); +std::tuple linalg_eig_jvp( + const Tensor& dA, + const Tensor& L, + const Tensor& V, + const bool is_hermitian); +Tensor linalg_eig_backward( + const Tensor& gL, + const Tensor& gV, + const Tensor& L, + const Tensor& V, + const bool is_hermitian, + const bool symeig_eigenvectors = true); +Tensor linalg_lstsq_jvp( + const Tensor& A, + const Tensor& B, + const Tensor& dA, + const Tensor& dB); +std::tuple triangular_solve_backward( + const Tensor& grad_x, + const Tensor& grad_m, + const Tensor& b, + const Tensor& a, + const Tensor& x, + const bool upper, + const bool transpose, + const bool unitriangular, + std::array output_mask); +Tensor triangular_solve_jvp( + const Tensor& X, + const Tensor& A, + const Tensor& dA, + const Tensor& dB, + const bool upper, + const bool transpose, + const bool unitriangular); +Tensor linalg_solve_triangular_forward_AD( + const Tensor& A_t, + const Tensor& B_t, + const Tensor& A, + const Tensor& X, + const bool upper, + const bool left, + const bool unitriangular); +std::tuple linalg_solve_triangular_backward( + const Tensor& grad, + const Tensor& A, + const Tensor& X, + const bool upper, + const bool left, + const bool unitriangular, + std::array output_mask); +std::tuple _trilinear_backward( + const Tensor& grad_out, + const c10::optional& i1, + const c10::optional& i2, + const c10::optional& i3, + IntArrayRef expand1, + IntArrayRef expand2, + IntArrayRef expand3, + IntArrayRef sumdim, + std::array grad_mask); +std::tuple linalg_qr_jvp( + const Tensor& dA, + const Tensor& Q, + const Tensor& R, + const c10::string_view mode); +Tensor linalg_qr_backward( + const Tensor& gQ, + const Tensor& gR, + const Tensor& Q, + const Tensor& R, + const c10::string_view mode); +Tensor linalg_matrix_exp_differential( + const Tensor& self, + const Tensor& grad, + bool adjoint); +std::tuple batchnorm_double_backward( + const Tensor& input, + const c10::optional& gamma, + const Tensor& ggI, + const Tensor& ggG, + const Tensor& ggB, + const Tensor& gO, + const c10::optional& running_mean, + const c10::optional& running_var, + bool training, + double eps, + const c10::optional& save_mean, + const c10::optional& save_invstd, + std::array output_mask); +std::tuple _euclidean_dist_backward( + const Tensor& grad, + const Tensor& x1, + const Tensor& x2, + const Tensor& res); +Tensor fft_backward( + const Tensor& self, + const Tensor& grad, + int64_t signal_ndim, + bool complex_input, + bool complex_output, + bool inverse, + IntArrayRef checked_signal_sizes, + int64_t normalization, + bool onesided, + IntArrayRef output_sizes); +Tensor fft_r2c_backward( + const Tensor& grad, + at::IntArrayRef dim, + int64_t normalization, + bool onesided, + const c10::SymInt& last_dim_size); +Tensor fft_c2r_backward( + const Tensor& grad, + IntArrayRef dim, + int64_t normalization); +Tensor constant_pad_nd_backward(const Tensor& grad, c10::SymIntArrayRef pad); +std::tuple cholesky_solve_backward( + const Tensor& grad_x, + const Tensor& self, + const Tensor& input2, + const Tensor& result, + const bool upper, + std::array output_mask); +Tensor cholesky_solve_jvp( + const Tensor& X, + const Tensor& U, + const Tensor& dU, + const Tensor& dB, + const bool upper); +std::tuple +infinitely_differentiable_native_group_norm_backward( + const Tensor& dY, + const Tensor& dmean, + const Tensor& drstd, + const Tensor& X, + const Tensor& mean, + const Tensor& rstd, + const c10::optional& gamma, + c10::SymInt N, + const c10::SymInt& C, + c10::SymInt HxW, + int64_t group, + double eps, + std::array grad_input_mask); +Tensor gelu_double_backward( + const Tensor& ggI, + const Tensor& gO, + const Tensor& input, + c10::string_view approximate); +Tensor as_strided_backward( + Tensor grad, + const TensorGeometry& input_geometry, + c10::SymIntArrayRef sizes, + c10::SymIntArrayRef strides, + const optional& storage_offset_); +Tensor as_strided_scatter_backward( + const Tensor& grad, + const TensorGeometry& input_geometry, + const TensorGeometry& src_geometry, + c10::SymIntArrayRef sizes, + c10::SymIntArrayRef strides, + optional storage_offset); +std::tuple atan2_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& other, + std::array output_mask); +Tensor amaxamin_jvp( + const Tensor& x, + const Tensor& dx, + const Tensor& result, + IntArrayRef dim, + bool keepdim); +std::tuple layer_norm_double_backward( + const Tensor& input, + const c10::optional& gamma, + const Tensor& ggI, + const Tensor& ggG, + const Tensor& ggB, + const Tensor& gO, + const Tensor& save_mean, + const Tensor& save_invstd, + c10::SymIntArrayRef normalized_shape, + std::array output_mask); + +std::tuple householder_product_backward( + const Tensor& grad, + const Tensor& result, + const Tensor& input, + const Tensor& tau, + const bool flip_order = false); +Tensor householder_product_jvp( + const Tensor& dV, + const Tensor& dtau, + const Tensor& prod, + const Tensor& V, + const Tensor& tau); +std::tuple ormqr_backward( + const Tensor& grad, + const Tensor& result, + const Tensor& self, + const Tensor& tau, + const Tensor& other, + bool left, + bool transpose, + std::array grad_output_mask); +std::tuple polar_backward( + const Tensor& grad, + const Tensor& result); +Tensor i1_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& result); +Tensor i1e_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& result); +Tensor linalg_lu_solve_LU( + const Tensor& grad, + const Tensor& LU, + const Tensor& pivots, + const Tensor& X, + const bool left, + const bool adjoint); +Tensor linalg_lu_solve_jvp( + const Tensor& X, + const Tensor& LU, + const Tensor& pivots, + const Tensor& dLU, + const Tensor& dB, + const bool left, + const bool adjoint); +std::tuple linalg_solve_backward( + const Tensor& gX, + const Tensor& X, + const Tensor& A, + const Tensor& LU, + const Tensor& pivots, + const bool left, + const bool B_requires_grad); +Tensor linalg_solve_jvp( + const Tensor& dA, + const Tensor& dB, + const Tensor& X, + const Tensor& LU, + const Tensor& pivots, + const bool left, + const bool use_A_T); +Tensor lu_unpack_backward( + const Tensor& L_grad, + const Tensor& U_grad, + const c10::SymInt& m, + const c10::SymInt& n); + +Tensor linalg_det_backward( + const Tensor& grad, + const Tensor& det, + const Tensor& A, + const Tensor& LU, + const Tensor& pivots); +Tensor linalg_det_jvp( + const Tensor& dA, + const Tensor& det, + const Tensor& LU, + const Tensor& pivots, + const bool use_A_T); +std::tuple linalg_lstsq_backward( + const Tensor& grad, + const Tensor& A, + const Tensor& B_, + const std::array& grad_input_mask); +Tensor linalg_lu_backward( + const Tensor& L_grad, + const Tensor& U_grad, + const Tensor& P, + const Tensor& L, + const Tensor& U, + const bool pivot); + +std::tuple linalg_lu_jvp( + const Tensor& dA, + const Tensor& P, + const Tensor& L, + const Tensor& U, + const bool pivot); + +Tensor lu_factor_ex_backward( + const Tensor& grad, + const Tensor& LU, + const Tensor& pivs, + const bool pivot); +Tensor lu_factor_ex_jvp( + const Tensor& dX, + const Tensor& LU, + const Tensor& pivs, + const bool pivot); + +Tensor batch_norm_jvp( + const Tensor& input_p, + const Tensor& input_t, + const Tensor& weight_p, + const Tensor& weight_t, + const Tensor& bias_p, + const Tensor& bias_t, + const c10::optional& running_mean, + const c10::optional& running_var, + const Tensor& saved_mean, + const Tensor& saved_invstd, + bool train, + double eps); + +Tensor layer_norm_jvp( + const Tensor& input_p, + const Tensor& input_t, + const Tensor& weight_p, + const Tensor& weight_t, + const Tensor& bias_p, + const Tensor& bias_t, + const Tensor& saved_mean, + const Tensor& saved_invstd, + c10::SymIntArrayRef normalized_shape); + +Tensor group_norm_jvp( + const Tensor& input_p, + const Tensor& input_t, + const Tensor& weight_p, + const Tensor& weight_t, + const Tensor& bias_p, + const Tensor& bias_t, + const Tensor& saved_mean, + const Tensor& saved_invstd, + int64_t groups); +Tensor group_norm_mean_jvp( + const Tensor& input_t, + const Tensor& mean_p, + int64_t groups); +Tensor group_norm_invstd_jvp( + const Tensor& input_p, + const Tensor& input_t, + const Tensor& mean_p, + const Tensor& invstd_p, + int64_t groups); + +Tensor convolution_jvp( + const Tensor& input_p, + const Tensor& input_t, + const Tensor& weight_p, + const Tensor& weight_t, + const Tensor& bias_p, + const Tensor& bias_t, + at::SymIntArrayRef stride, + at::SymIntArrayRef padding, + at::SymIntArrayRef dilation, + bool transposed, + at::SymIntArrayRef output_padding, + const c10::SymInt& groups); + +Tensor _convolution_jvp( + const Tensor& input_p, + const Tensor& input_t, + const Tensor& weight_p, + const Tensor& weight_t, + const Tensor& bias_p, + const Tensor& bias_t, + at::SymIntArrayRef stride, + at::SymIntArrayRef padding, + at::SymIntArrayRef dilation, + bool transposed, + at::SymIntArrayRef output_padding, + const c10::SymInt& groups, + bool benchmark, + bool deterministic, + bool cudnn_enabled, + bool allow_tf32); + +Tensor convolution_backward_jvp_grad_bias( + const Tensor& grad_out_t, + const Tensor& grad_bias); + +Tensor cat_jvp(const at::ITensorListRef& tensors, int64_t dim); +Tensor block_diag_jvp(at::TensorList tensors); +Tensor stack_jvp(at::TensorList tensors, int64_t dim); +Tensor cumprod_jvp( + const Tensor& self_t, + const Tensor& self_p, + const Tensor& result, + int dim); +Tensor gather_with_keepdimed_indices( + const Tensor& input, + int64_t dim, + const Tensor& indices, + bool keepdim); +Tensor evenly_read_jvp( + const Tensor& fw_grad, + const Tensor& input, + const Tensor& value); +Tensor warn_backwards(const Tensor& grad_output); + +std::tuple _cudnn_convolution_backward( + const at::Tensor& self, + const at::Tensor& grad_output, + const at::Tensor& weight, + at::SymIntArrayRef padding, + at::SymIntArrayRef output_padding, + at::SymIntArrayRef stride, + at::SymIntArrayRef dilation, + bool transposed, + c10::SymInt groups, + ::std::array output_mask); + +Tensor scatter_reduce_jvp( + const Tensor& self_p, + const Tensor& self_t, + int dim, + const Tensor& index, + const Tensor& src_p, + const Tensor& src_t, + c10::string_view reduce, + bool include_self, + const Tensor& result); + +std::tuple scatter_reduce_backward( + const Tensor& grad, + const Tensor& self, + int dim, + const Tensor& index, + const Tensor& src, + c10::string_view reduce, + bool include_self, + const Tensor& result); + +Tensor _to_copy_backward( + const Tensor& grad, + const c10::TensorOptions& self_options); + +std::tuple index_reduce_backward( + const Tensor& grad, + const Tensor& self, + int dim, + const Tensor& index, + const Tensor& source, + c10::string_view reduce, + bool include_self, + const Tensor& result); + +Tensor take_backward( + const Tensor& grad, + const Tensor& self, + const Tensor& indices); + +Tensor to_sparse_backward( + const Tensor& grad, + const c10::Layout self_layout, + const c10::OptionalArrayRef& self_blocksize); + +std::tuple +mkldnn_rnn_layer_differentiable_backward( + const Tensor& input, + const Tensor& weight0, + const Tensor& weight1, + const Tensor& weight2, + const Tensor& weight3, + const Tensor& hx_, + const Tensor& cx_tmp, + const Tensor& output, + const Tensor& hy_, + const Tensor& cy_, + const c10::optional& grad_output_r_opt, + const c10::optional& grad_hy_r_opt, + const c10::optional& grad_cy_r_opt, + bool reverse, + int64_t mode, + int64_t hidden_size, + int64_t num_layers, + bool has_biases, + bool train, + bool bidirectional, + at::IntArrayRef batch_sizes, + bool batch_first, + const at::Tensor& workspace); + +Tensor values_backward(const Tensor& grad, const Tensor& self); + +} // namespace torch::autograd::generated::details diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..e29d1bbf054cb55890d9186bf6bc5374f9ed8f1a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include + +namespace torch::autograd { + +// forward declaration of Node from function.h +struct Node; + +struct TORCH_API AnomalyMode { + static bool is_enabled() { + return _enabled; + } + static bool should_check_nan() { + return _check_nan; + } + static void set_enabled(bool enabled, bool check_nan = true) { + _enabled = enabled; + _check_nan = check_nan; + } + + private: + static bool _enabled; + static bool _check_nan; +}; + +/// A RAII guard that enables Anomaly Detection Mode. +/// +/// Anomaly detection mode is useful for debugging problems happening +/// in the backward, such as unexpectedly modified tensors or NaNs +/// occuring in the backward. +/// +/// The enabling of anomaly mode is global - as soon as there is one +/// such guard, it is enabled for all computation and threads. It also +/// comes with a significant performance penalty. +/// +/// Example: +/// @code +/// auto x = torch::tensor({1.}, torch::requires_grad()); +/// { +/// torch::autograd::DetectAnomalyGuard detect_anomaly; +/// auto x = torch::tensor({5.0}, torch::requires_grad()); +/// auto y = x * x; +/// auto z = y * y; +/// y += 1; +/// z.backward(); +/// } +/// @endcode +class TORCH_API DetectAnomalyGuard { + public: + DetectAnomalyGuard(bool check_nan = true); + ~DetectAnomalyGuard(); + + private: + bool prev_check_nan_; +}; + +struct TORCH_API AnomalyMetadata { + virtual ~AnomalyMetadata(); + virtual void store_stack(); + virtual void print_stack(const std::string& current_node_name); + virtual void assign_parent(const std::shared_ptr& parent_node); + + private: + std::string traceback_; + std::shared_ptr parent_; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..3537df9bc4a7dfa7c3d700a2a6de00d88d328219 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h @@ -0,0 +1,104 @@ +#pragma once + +#include + +namespace torch::autograd { + +/// Computes the sum of gradients of given tensors with respect to graph leaves. +/// +/// The graph is differentiated using the chain rule. If any of ``tensors`` +/// are non-scalar (i.e. their data has more than one element) and require +/// gradient, then the Jacobian-vector product would be computed, in this case +/// the function additionally requires specifying `grad_tensors`. It should be a +/// sequence of matching length, that contains the "vector" in the +/// Jacobian-vector product, usually the gradient of the differentiated function +/// w.r.t. corresponding tensors +/// (`torch::Tensor()` is an acceptable value for all tensors that don't need +/// gradient tensors). +/// +/// This function accumulates gradients in the leaves - you might need to zero +/// them before calling it. +/// +/// \param tensors Tensors of which the derivative will be computed. +/// \param grad_tensors The "vector" in the Jacobian-vector product, usually +/// gradients +/// w.r.t. each element of corresponding tensors. `torch::Tensor()` values +/// can be specified for scalar Tensors or ones that don't require grad. If +/// a `torch::Tensor()` value would be acceptable for all grad_tensors, then +/// this argument is optional. +/// \param retain_graph If `false`, the graph used to compute the grad will be +/// freed. +/// Note that in nearly all cases setting this option to `true` is not +/// needed and often can be worked around in a much more efficient way. +/// Defaults to the value of `create_graph`. +/// \param create_graph If `true`, graph of the derivative will be constructed, +/// allowing +/// to compute higher order derivative products. Defaults to `false`. +/// \param inputs Inputs w.r.t. which the gradient will be accumulated into +/// `at::Tensor::grad`. All other Tensors will be ignored. If not provided, +/// the gradient is accumulated into all the leaf Tensors that were used to +/// compute param `tensors`. +// When inputs are provided and a given input is not a leaf, +// the current implementation will call its grad_fn (even though it is not +// strictly needed to get this gradients). It is an implementation detail +// on which the user should not rely. See +// https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for +// more details. +TORCH_API void backward( + const variable_list& tensors, + const variable_list& grad_tensors = {}, + c10::optional retain_graph = c10::nullopt, + bool create_graph = false, + const variable_list& inputs = {}); + +/// Computes and returns the sum of gradients of outputs with respect to the +/// inputs. +/// +/// ``grad_outputs`` should be a sequence of length matching ``output`` +/// containing the "vector" in Jacobian-vector product, usually the pre-computed +/// gradients w.r.t. each of the outputs. If an output doesn't require_grad, +/// then the gradient can be ``torch::Tensor()``). +/// +/// \param outputs outputs of the differentiated function. +/// \param inputs Inputs w.r.t. which the gradient will be +/// returned (and not accumulated into ``at::Tensor::grad``). +/// \param grad_outputs The "vector" in the Jacobian-vector product. +/// Usually gradients w.r.t. each output. `torch::Tensor()` values can be +/// specified for scalar Tensors or ones that don't require grad. If a +/// `torch::Tensor()` value would be acceptable for all grad_tensors, then +/// this argument is optional. Default: `{}`. +/// \param retain_graph If ``false``, the graph used to compute the grad +/// will be freed. Note that in nearly all cases setting this option to +/// ``true`` is not needed and often can be worked around in a much more +/// efficient way. Defaults to the value of ``create_graph``. +/// \param create_graph If ``true``, graph of the derivative will +/// be constructed, allowing to compute higher order derivative products. +/// Default: ``false``. +/// \param allow_unused If ``false``, specifying inputs that were not +/// used when computing outputs (and therefore their grad is always zero) +/// is an error. Defaults to ``false``. +TORCH_API variable_list grad( + const variable_list& outputs, + const variable_list& inputs, + const variable_list& grad_outputs = {}, + c10::optional retain_graph = c10::nullopt, + bool create_graph = false, + bool allow_unused = false); + +namespace forward_ad { + +/// Creates a new dual level and returns its index. This level index should then +/// be used to call into the other functions below. This API supports entering a +/// new level before the previous one is exited. We call them nested forward AD +/// levels. These can be used to compute higher order derivatives. +TORCH_API uint64_t enter_dual_level(); + +/// Exits the given level. This will clear up all the gradients from this level +/// and all dual Tensors that had gradients for this level will become regular +/// Tensors again. This function can only be used to exit the innermost nesting +/// level and so exiting must happen in reverse order compared to the entering +/// that was done with the function above. +TORCH_API void exit_dual_level(uint64_t level); + +} // namespace forward_ad +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..e48c587296b2d2874231bb7676f88dbdec0aad9f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h @@ -0,0 +1,29 @@ +#pragma once +#include +#include +#include + +namespace torch::autograd { + +using hooks_list = + std::vector>; + +struct CppFunctionTensorPreHook : public FunctionPreHook { + CppFunctionTensorPreHook(std::shared_ptr hooks, size_t value_idx); + variable_list operator()(const variable_list& values) override; + + std::shared_ptr hooks_; + size_t value_idx_; +}; + +struct CppFunctionSingleTensorPreHook : public FunctionPreHook { + CppFunctionSingleTensorPreHook( + std::function hook, + size_t value_idx); + variable_list operator()(const variable_list& values) override; + + std::function hook_; + size_t value_idx_; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h new file mode 100644 index 0000000000000000000000000000000000000000..0b3f3ae67f0acd5a048a6c65ceb284730a681e1a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h @@ -0,0 +1,288 @@ +#pragma once + +// Engine implements backpropagation from output variables and their gradients +// to "root" variables (variables created by the user with requires_grad=True). + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace torch::autograd { +struct ReadyQueue; +} + +namespace torch::autograd { + +// Maximum reentrant backward depth before switching to a new thread +// This limit is based on the TSAN's deadlock detector, where it will +// fail if a program hold more than 65 locks in one thread at once. +// As we hold mutex in every of our custom C++ autograd Node, we would +// like to avoid TSAN complains on this when doing reentrant backwards +// For reference, see https://github.com/google/sanitizers/issues/950 +static constexpr int MAX_DEPTH = 60; + +void set_device(int device); +TORCH_API void validate_outputs( + const edge_list& edges, + variable_list& grads, + const std::function& format_error); + +struct NodeTask { + std::weak_ptr base_; + std::shared_ptr fn_; + // This buffer serves as an implicit "addition" node for all of the + // gradients flowing here. Once all the dependencies are finished, we + // use the contents of this buffer to run the function. + InputBuffer inputs_; + // When worker receives a task with isShutdownTask = true, it will immediately + // exit. The engine sends a shutdown task to every queue upon its destruction. + bool isShutdownTask_; + + int getReentrantDepth() const; + + NodeTask( + std::weak_ptr base, + std::shared_ptr fn, + InputBuffer inputs, + bool isShutdownTask = false) + : base_(std::move(base)), + fn_(std::move(fn)), + inputs_(std::move(inputs)), + isShutdownTask_(isShutdownTask) {} +}; + +// Guard that sets and restores checkpoint_valid +class CheckpointValidGuard { + public: + explicit CheckpointValidGuard( + const std::shared_ptr& graph_task); + ~CheckpointValidGuard(); + + private: + bool prev_checkpoint_valid_state; +}; + +struct ReadyQueue { + private: + // Returns true when t2 should be (weakly) BEFORE t1 in the queue. + // Shutdown tasks are first and then empty NodeTask are next. + struct CompareNodeTaskTime { + bool operator()(NodeTask const& t1, NodeTask const& t2) { + // NOLINTNEXTLINE(bugprone-branch-clone) + if (t2.isShutdownTask_) { + return true; + } else if (!t1.fn_ || t1.isShutdownTask_) { + return false; + } else if (!t2.fn_) { + return true; + } else if (t1.getReentrantDepth() == t2.getReentrantDepth()) { + return t1.fn_->sequence_nr() < t2.fn_->sequence_nr(); + } else { + return t1.getReentrantDepth() < t2.getReentrantDepth(); + } + } + }; + + // To notify threads waiting on the ReadyQueue of available tasks on the heap_ + std::condition_variable not_empty_; + // To protect read and writes to heap_ + mutable std::mutex mutex_; + + std::priority_queue, CompareNodeTaskTime> + heap_; + + public: + // incrementOutstandingTasks indicates whether or not we should increment + // 'outstanding_tasks_' for the associated GraphTask. This should mostly + // always be true and is only set false in certain cases (see docs for + // DistEngine.execute_graph_task_until_ready_queue_empty) + void push(NodeTask item, bool incrementOutstandingTasks = true); + void pushShutdownTask(); + NodeTask pop(); + bool empty() const; + size_t size() const; +}; + +// A single instance of this struct should be created through the whole process +// lifetime. The worker thread creation logic and Engine's destructor rely on +// this. +struct TORCH_API Engine { + /// Returns a reference to a static `Engine` instance. + static Engine& get_default_engine(); + + static Engine& get_base_engine(); + + // compiled_autograd needs to live in a different .so file so that it + // can have python symbols, so we add a layer of indirection + // see [Note: Compiled Autograd] + typedef variable_list (*compiled_autograd_fn)( + const std::shared_ptr& graph_root, + GraphTask& graph_task, + bool accumulate_grad, + const edge_list& outputs); + static void set_compiled_autograd(compiled_autograd_fn fn); + + Engine(const Engine&) = delete; + Engine(Engine&&) = delete; + virtual ~Engine(); + + // Given a list of (Node, input number) pairs computes the value of the graph + // by following next_edge references. + virtual variable_list execute( + const edge_list& roots, + const variable_list& inputs, + bool keep_graph, + bool create_graph, + bool accumulate_grad, + const edge_list& outputs = {}); + + // Given a pre-populated GraphTask and GraphRoot, computes the backward pass + // for the graph. + // + // NB: This API should only be used by internal autograd specific + // machinery and shouldn't be exposed to users in anyway. + virtual c10::intrusive_ptr execute_with_graph_task( + const std::shared_ptr& graph_task, + std::shared_ptr graph_root, + InputBuffer&& input_buffer); + + virtual std::unique_ptr make_anomaly_metadata() { + return std::make_unique(); + } + + virtual std::unique_ptr get_default_saved_variable_hooks() { + return nullptr; + } + + // We pass cpu_ready_queue to evaluate_function, so that it knows + // the correct ready queue to push to after a NodeTask is ready + void evaluate_function( + std::shared_ptr& graph_task, + Node* func, + InputBuffer& inputs, + const std::shared_ptr& cpu_ready_queue); + + void initialize_device_threads_pool(); + virtual void thread_on_exception( + std::shared_ptr graph_task, + const std::shared_ptr& fn, + std::exception& e); + + void queue_callback(std::function callback); + + bool is_checkpoint_valid(); + + // Should be called after fork to notify that worker threads are gone + void release_workers(); + + // Must be called by subclass before destructing to avoid a data-race-on-vptr. + void stop(); + + // Initializes a device thread for the autograd engine. + virtual void thread_init( + int device, + const std::shared_ptr& ready_queue, + bool should_increment = true); + + protected: + Engine(); + void compute_dependencies(Node* root, GraphTask& task, uint64_t min_topo_nr); + + // initialize the thread local ready queue with the ready queue that is + // created elsewhere (i.e. thread_init, Engine::execute, etc), or create a new + // ready queue if ready_queue is not provided. + void init_local_ready_queue( + std::shared_ptr ready_queue = nullptr); + + std::shared_ptr ready_queue( + std::shared_ptr cpu_ready_queue, + at::Device device); + std::shared_ptr ready_queue_by_index( + std::shared_ptr cpu_ready_queue, + int device_index); + // start device threads (CUDA, XLA, etc.) in Engine, + // note that it does NOT start CPU thread. + void start_device_threads(); + void increment_non_reentrant_thread_count(); + void decrement_non_reentrant_thread_count(); + virtual void thread_main(const std::shared_ptr& task); + void reentrant_thread_init(); + void add_thread_pool_task(const std::weak_ptr& graph_task); + + // Ensures device_ready_queues_ are initialized only once + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + c10::once_flag start_device_threads_flag_; + // Safe to read device_ready_queues_ without synchronization after + // initialization + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector> device_ready_queues_; + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector> final_callbacks_; + // To protect reads and writes to final_callbacks_ + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::mutex post_callbacks_lock_; + + // How many nested reentrant calls are allowed until a new thread is used + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + int max_recursion_depth_; + + struct ThreadPoolShared { + // Data structures used by the threads for executing reentrant backwards + // tasks. See Note [Reentrant backwards] + // Number of available threads for processing new GraphTasks. + unsigned int num_workers_{0}; + // The threads will wait on work_ to be notified of GraphTasks + std::condition_variable work_; + // To protect reads and writes to graphtask_queue_ and num_workers_ + // and for synchronizing creating new threads when needed + std::mutex mutex_; + // Workers will process the GraphTasks added to this queue. A GraphTask is + // allocated inside Engine::execute and lives for the duration of execute + std::queue> graphtasks_queue_; + + ThreadPoolShared() = default; + }; + + // Temporary workaround until shutting down threads is done + // We need shared ownership of all these objects because the threads are + // leaked when Engine shuts down, so there may be threads waiting on work_ for + // the graphtasks_queue_ to be nonempty. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr thread_pool_shared_; + + private: + // Number of non-reentrant threads + std::atomic non_reentrant_device_thread_count_; + // Destructor will wait for non-reentrant threads to finish + std::condition_variable non_reentrant_device_thread_condvar_; + std::mutex non_reentrant_device_thread_mutex_; + // stop() must be called before the destruction path goes down to the base + // class, in order to avoid a data-race-on-vptr. Use this boolean to guard + // whether stop() has already been called, so we can call this in every + // destructor of the class hierarchy. + bool stopped_{false}; +}; + +// allow python_engine to override the default engine when it loads +using EngineStub = Engine& (*)(); +TORCH_API void set_default_engine_stub(EngineStub stub); + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..9b111ac6b484896c798672ef0591ee3c04478e31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h @@ -0,0 +1,210 @@ +#pragma once + +#include +#include + +namespace torch::autograd { + +// [ Using ForwardGrad ] +// ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner +// design. But this shared_ptr must be uniquely associated with the object that +// stores it (as of writing, either AutogradMeta or SavedVariable). This object +// is called the "owning object" in the discussions below. This owning object +// must call `ForwardGrad::clear()` when it is destroyed to ensure that the +// ForwardGrad is properly de-allocated. + +struct ForwardGrad; + +// This file contains two classes that are used to store forward AD gradients +// and ensure that they are scoped properly. Because forward AD runs +// concurrently with the evaluation of the function, we need a mechanism to +// separate different forward AD invocations and be able to compute the right +// gradients. We model such invocations as levels here. The particular scoping +// issue mentioned above has two main drivers: +// - Ensure that we can conveniently use forward AD within a high level API +// without +// leaking the forward AD states outside. +// - Ensure that we can keep the level that we expose to the user API simple +// (an integer +// that represents the nesting depth) while avoiding confusions when the +// level index is re-used. + +// The important external APIs from this file are: +// - ForwardADLevel::get_next_idx() that can be used to enter a new level and +// get its index +// - ForwardADLevel::release_idx() that can be used to exit a given level. +// - ForwardGrad() can be used to store a given forward gradient that will +// handle the level +// tracking automatically. + +// The basic implementation strategy is as follows: +// Every tensor has a ForwardGrad, maintaining a map from levels to tangents. +// ForwardGrad is responsible for registering itself to the appropriate +// ForwardADLevel when a new tangent is added to it via ForwardGrad::set_value +// and to un-register itself from this same level if that tangent is removed via +// ForwardGrad::reset. The ForwardADLevel is created when a new level is entered +// via ForwardADLevel::get_next_idx. A reference to the new ForwardADLevel is +// stored into a global (for the whole process) vector that ensure it can be +// accessed via ForwardADLevel::get_by_idx. This reference is deleted when the +// index is released by the user when calling ForwardADLevel::release_idx. When +// it is destructed, the ForwardADLevel is responsible for clearing all the +// tangents for its level stored in all the ForwardGrad that registered with it. +// +// This process-wide level design, compared to a thread local one, allows us to +// use very simple user facing handle for the level (an int) while enabling +// cross-thread forward AD. The only required synchronization for the user is +// when entering and exiting the levels. Some discussion on alternative design +// is in https://github.com/pytorch/pytorch/pull/49097#discussion_r543716453 and +// can be refined in the future. + +// Correctness of concurrency: +// Each class uses its own lock when reading or modifying internal storages. +// This allows in particular to safely remove tangents from ForwardGrad when the +// ForwardADLevel is being exited. We ensure no deadlock by ensuring that a +// methods never calls into another class's method while the local class's lock +// is held except in one single case: calling from ForwardADLevel's destructor +// into ForwardGrad::reset with update_level=false. + +// The lifetime of these objects is as follows: +// The ForwardADLevel can be in three states: +// - Initialized: where one of its reference is held by the global vector +// and there may be more +// references held by temporary variables in ForwardGrad's methods. +// - About to be destructed: where "release_idx" has been called and the +// only reason for the +// ForwardADLevel not to be destructed right away is that some methods in +// ForwardGrad have owning reference to it. This is done so that a +// ForwardADLevel can never be destructed when a ForwardGrad is +// registered with it and in the process of adding something to its +// internal state. +// - Being destructed: Here the ForwardADLevel is not referenced anymore +// and can be safely reset +// all of the ForwardGrad. Note that we can have more than one reset +// being called here (which is ok) but we are guaranteed that there is at +// least one. +// The ForwardGrad is simpler as there is no intermediary state and no special +// destructor for. The logic to unregister it from the different ForwardADLevel +// is done when the owning object (AutogradMeta or SavedVariable) is being +// destroyed. + +// Other considered design: +// To avoid having the ForwardGrad::clear, we considered storing weak_ptr inside +// the ForwardADLevel. While this would work, it would mean that the set inside +// the ForwardADLevel would only grow unless we do an expensive linear scan to +// remove all the dangling weak pointers. Hence this approach was not used. + +// Data structures in this file are optimized for this maximum number of levels. +// The number of levels corresponds to the degree of the gradient being +// computed using forward AD and we don't expect more than second order +// gradients to be common. +#define EXPECTED_MAX_LEVEL 2 + +struct TORCH_API ForwardADLevel { + ForwardADLevel(uint64_t idx) : idx_(idx) {} + ~ForwardADLevel(); + + static uint64_t get_next_idx(); + static void release_idx(uint64_t idx); + static std::shared_ptr get_by_idx(uint64_t idx); + static std::shared_ptr try_get_by_idx(uint64_t idx); + + void erase(const std::shared_ptr& grad) { + std::lock_guard lock(mutex_); + grads_.erase(grad); + } + + void insert(const std::shared_ptr& grad) { + std::lock_guard lock(mutex_); + grads_.insert(grad); + } + + private: + std::unordered_set> grads_; + std::mutex mutex_; + uint64_t idx_; +}; + +struct TORCH_API ForwardGrad : std::enable_shared_from_this { + ForwardGrad() = default; + + // This function must only be called when AutogradMeta or SavedVariable is + // being destructed as it ensures that: + // - The only (potential) other references to this ForwardGrad are the + // different level it is registered to + // - No other thread will try to call `set_value` or `value` ever from now + // on + // - Any of the ForwardADLevel that this ForwardGrad is registered with + // might + // call `reset` at any point during this function + void clear() { + c10::SmallVector levels_idx; + + { + std::lock_guard lock(mutex_); + for (auto& c : content_) { + levels_idx.push_back(c.first); + } + } + + for (auto l_idx : levels_idx) { + // Use "try" version here as another thread might have deleted this + // level before we got here + // This is an owning reference as we want to keep the level alive + // until we successfully unregister ourselves + auto level = ForwardADLevel::try_get_by_idx(l_idx); + if (level) { + level->erase(shared_from_this()); + } + } + } + + void set_value(const at::Tensor& value, uint64_t level) { + // Owning reference to ensure the forward_level is not destroyed + // while we are updating our internal state + auto forward_level = ForwardADLevel::get_by_idx(level); + forward_level->insert(shared_from_this()); + + std::lock_guard lock(mutex_); + content_.insert({level, value}); + } + + // This function removes the tangent for a given level from this ForwardGrad + // Use the update_level flag to disable notifying the level about this reset + // This flag is most notably used by the ForwardADLevel destructor. + void reset(uint64_t level, bool update_level = true) { + if (update_level) { + ForwardADLevel::get_by_idx(level)->erase(shared_from_this()); + } + + std::unique_lock lock(mutex_); + const auto& it = content_.find(level); + TORCH_INTERNAL_ASSERT( + it != content_.end(), "Resetting a non-existent level."); + // Keep the Tensor alive until we have released the lock + // This is needed as we can be in a case where this function is called by + // ForwardADLevel destructor + auto t = (*it).second; + content_.erase(level); + lock.unlock(); + } + + const at::Tensor& value(uint64_t level) const; + + bool contains(uint64_t level) { + std::lock_guard lock(mutex_); + return content_.count(level) > 0; + } + + bool empty() const { + return content_.empty(); + } + + static const at::Tensor& undef_grad(); + + private: + // TODO(albanD): replace this with a SmallVector + std::unordered_map content_; + mutable std::mutex mutex_; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..2efde9d5f2f2e6ee4a074389aa9dcec7c35459d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h @@ -0,0 +1,277 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +#include + +namespace torch { +namespace autograd { + +#define CHECK_RESULT(RESULT, VAR) \ + if (!(RESULT.is_sparse() || VAR.is_sparse() || RESULT.is_sparse_csr() || \ + VAR.is_sparse_csr())) { \ + if (!utils::obeys_layout_contract(RESULT, VAR)) { \ + TORCH_WARN_ONCE( \ + "grad and param do not obey the gradient layout contract. " \ + "This is not an error, but may impair performance.\n" \ + "grad.sizes() = ", \ + RESULT.sizes(), \ + ", strides() = ", \ + RESULT.strides(), \ + "\n", \ + "param.sizes() = ", \ + VAR.sizes(), \ + ", strides() = ", \ + VAR.strides()); \ + } \ + } + +struct TORCH_API AccumulateGrad : public Node { + explicit AccumulateGrad(Variable variable_); + + variable_list apply(variable_list&& grads) override; + + std::vector>& tensor_pre_hooks() noexcept + override { + // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor, + // it can be destroyed even though the Tensor is still alive (contrary + // to all other Nodes). So we must lazily read the Tensor hooks here. + return impl::hooks(variable); + } + + std::unique_ptr& tensor_post_acc_grad_hooks() noexcept + override { + // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor, + // it can be destroyed even though the Tensor is still alive (contrary + // to all other Nodes). So we must lazily read the Tensor hooks here. + return impl::post_acc_grad_hooks(variable); + } + + // Given a variable with its current grad as variable_grad, accumulates + // new_grad into variable_grad if in place accumulation is possible. + // Otherwise, uses 'update_grad' to update the grad for the variable. + + // "Gradient Layout Contract" + // + // AccumulateGrad tries to stash strided (non-sparse) grads with memory layout + // (strides) such that variables and grads interact efficiently in later + // optimizer kernels, and grads interact efficiently with c10d::Reducer.cpp. + // + // Specifically, AccumulateGrad tries to ensure the following + // (cf torch/csrc/autograd/utils/grad_layout_contract.h): + // (1) if variable.is_non_overlapping_and_dense(), the stashed grad's + // strides match variable. + // (2) else, stashed grad is rowmajor contiguous. + // If variable's grad does not exist (!variable_grad.defined()) + // AccumulateGrad steals new_grad if it's stealable and obeys the contract + // already, otherwise it deep copies new_grad into an obedient clone. + // + // If variable's grad already exists (variable_grad.defined()), new_grad must + // be added to variable_grad. If we aren't setting up for double backward + // (!GradMode::is_enabled()), AccumulateGrad performs "variable_grad += + // new_grad" in-place, which keeps variable_grad's layout. We assume (hope) + // variable_grad was created obeying (1) or (2) at some point in the past. + // + // If we are setting up for double backward, AccumulateGrad updates the grad + // out-of-place via "variable_grad + new_grad." TensorIterator operator+ + // decides result's layout. Typically TensorIterator matches strides of the + // first arg, so we once again assume (hope) variable_grad was originally + // created obeying (1) or (2). + // + // AccumulateGrad does not enforce the contract with 100% certainty. Examples: + // - If a user manually permutes a param or its grad, then runs a fwd+bwd, + // variable_grad += new_grad keeps variable_grad's layout without + // rechecking the contract. + // - If TensorIterator changes its corner cases about operator+'s result + // (for example, giving more or less priority to channels_last inputs, see + // https://github.com/pytorch/pytorch/pull/37968) the result may not obey. + // + // Fortunately, if a given grad doesn't satisfy (1) or (2), the penalty is + // degraded performance in Reducer.cpp or optimizer kernels, not death by + // assert or silently bad numerics. + + // variable: the variable whose grad we're accumulating. + // variable_grad: the current grad for the variable. + // new_grad: new grad we want to accumulate for the variable. + // num_expected_refs: the number of refs we expect to hold internally + // such that it is safe to avoid cloning the grad + // if use_count() of the grad is less than or equal + // to this value (in addition to post_hooks). + // update_grad: Function that is used to update grad for the variable. + // The argument to the function is a Tensor which + // is used to set a new value for the grad. + template + static void accumulateGrad( + const Variable& variable, + at::Tensor& variable_grad, + const at::Tensor& new_grad, + size_t num_expected_refs, + const T& update_grad) { + if (!variable_grad.defined()) { + if (!GradMode::is_enabled() && !new_grad.is_sparse() && + !new_grad.is_sparse_csr() && + !(variable.is_sparse_csr() && new_grad.layout() == at::kStrided) && + at::caching::adjusted_use_count(new_grad) <= num_expected_refs && + (new_grad.is_mkldnn() || + utils::obeys_layout_contract(new_grad, variable))) { + // we aren't setting up for double-backward + // not sparse + // no other user-visible tensor references new_grad + // new_grad obeys the "Gradient Layout Contract", there has a special + // case, For MKLDNN tensor, which is a opaque tensor, assuming it obeys + // layout_contract. Under these conditions, we can steal new_grad + // without a deep copy. + update_grad(new_grad.detach()); + } else if ( + !GradMode::is_enabled() && new_grad.is_sparse() && + new_grad._indices().is_contiguous() && + new_grad._values().is_contiguous() && + // Use count for indices and values should always be <=1 since the + // SparseTensor should be the only one holding a reference to these. + new_grad._indices().use_count() <= 1 && + new_grad._values().use_count() <= 1 && + new_grad.use_count() <= num_expected_refs) { + // Can't detach sparse tensor (since metadata changes are not allowed + // after detach), so just create a new one for the grad which is a + // shallow copy. We need a shallow copy so that modifying the original + // grad tensor doesn't modify the grad we accumulate. + // We only skip clone if indices and values themselves are contiguous + // for backward compatibility reasons. Since without this optimization, + // earlier we would clone the entire SparseTensor which cloned indices + // and values. + // For details see https://github.com/pytorch/pytorch/issues/34375. + + // No scenario where we expect this to be true currently + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + !at::caching::is_cached_tensor(new_grad._indices()) && + !at::caching::is_cached_tensor(new_grad._values()) && + !at::caching::is_cached_tensor(new_grad)); + + update_grad(at::_sparse_coo_tensor_unsafe( + new_grad._indices(), + new_grad._values(), + new_grad.sizes(), + new_grad.options())); + } else { + if (new_grad.is_sparse() || new_grad.is_sparse_csr() || + new_grad.is_nested()) { + update_grad(new_grad.clone()); + } else { + if (new_grad.is_mkldnn()) { + update_grad(new_grad.clone()); + } else { + // Deep copies new_grad according to the "Gradient Layout Contract." + update_grad(utils::clone_obey_contract(new_grad, variable)); + } + } + } + } else if (!GradMode::is_enabled()) { + // This case is not strictly necessary, but it makes the first-order only + // case slightly more efficient. + if (variable_grad.is_sparse() && !new_grad.is_sparse()) { + // If `variable_grad` is sparse and `new_grad` is not sparse, their + // sum is not sparse, and we must change the TensorImpl type of + // `variable_grad` for it to store the result. However, changing the + // TensorImpl type of a tensor requires changing the tensor itself, and + // thus in this case we have to change the grad tensor. + auto result = new_grad + variable_grad; + CHECK_RESULT(result, variable); + update_grad(std::move(result)); + } else if (!at::inplaceIsVmapCompatible(variable_grad, new_grad)) { + // Ideally we'd perform an in-place operation to avoid changing + // the grad tensor. However, if that's impossible because the grads + // are vmap-incompatible (See NOTE: [vmap-incompatible in-place + // operations]), then we just add them out-of-place. + auto result = variable_grad + new_grad; + CHECK_RESULT(result, variable); + update_grad(std::move(result)); + } else { + // In this case we can avoid changing the grad tensor. There are three + // scenarios when we'll hit this case: + // + // 1. `variable_grad` is sparse, and `new_grad` is sparse. + // 2. `variable_grad` is dense, and `new_grad` is sparse. + // 3. `variable_grad` is dense, and `new_grad` is dense. + // 4. `variable_grad` is mkldnn, and `new_grad` is mkldnn. + // + // In all of these four cases, `variable_grad += new_grad` is a + // valid operation which adds `new_grad` to `variable_grad` in + // place. `variable_grad` is thus still referring to the same tensor + // after the operation. + // Also DistributedDataParallel(DDP) package relies on grad being + // mutated in place for saving peak memory usage. DDP will still + // work correctly if it is mutated out of place here, but DDP will + // maintain one extra copy of grad tensors in buffer and thus + // increase peak memory usage. + variable_grad += new_grad; + CHECK_RESULT(variable_grad, variable); + // ^ We could enforce the contract more aggressively here by writing: + // if (variable_grad.is_sparse() || new_grad.is_sparse()) { + // variable_grad += new_grad; + // } else if (obeys_layout_contract(variable_grad, variable)) { + // variable_grad += new_grad; + // } else { + // result = at::empty_strided(variable.sizes(), variable.strides(), + // variable.options().memory_format(c10::nullopt)); + // update_grad(at::native::add_out(result, variable_grad, + // new_grad, 1.0); + // } + // However, that accumulation is sometimes in place and sometimes not, + // which may break user code. + } + } else { + at::Tensor result; + if (variable_grad.is_sparse() && !new_grad.is_sparse()) { + // CPU backend throws an error on sparse + dense, so prefer dense + + // sparse here. + result = new_grad + variable_grad; + } else { + // Assumes operator+ result typically matches strides of first arg, + // and hopes variable_grad was originally created obeying layout + // contract. + result = variable_grad + new_grad; + } + CHECK_RESULT(result, variable); + update_grad(std::move(result)); + // ^ We could enforce the contract more aggressively here by saying + // if (obeys_layout_contract(new_grad, variable)) { + // update_grad(new_grad + variable_grad); + // } else { + // update_grad(variable_grad + new_grad); + // } + // such that the stashed grad is likely to have the right strides if + // either variable_grad or new_grad already has the right strides. + // We could enforce the contract with certainty by saying + // auto result = variable_grad + new_grad (or vice versa), checking + // result's layout, and copying to an obedient clone if necessary before + // update_grad. The copy would require another gmem pass. We can't create + // empty result with the right layout then add_out into it with a single + // kernel, because GradMode is enabled in this branch, and add_out isn't + // differentiable. Maybe more trouble than it's worth. + } + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + Variable variable; +}; + +#undef CHECK_RESULT + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2248c7082d8cb6e963b14d68fc71e00432141e53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h @@ -0,0 +1,111 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace autograd { + +struct TORCH_API Error : public Node { + Error(std::string msg, edge_list&& next_edges) + : Node(std::move(next_edges)), msg(std::move(msg)) {} + + Error(std::string msg) : msg(std::move(msg)) {} + + variable_list apply(variable_list&& inputs) override; + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + std::string msg; +}; + +// We print grad_fn names in tensor printing. For functions with backward +// NYI, grad_fn= will be printed if we use Error, which is confusing. So +// special case with a new NotImplemented function here. +struct TORCH_API NotImplemented : public Error { + NotImplemented(const std::string& forward_fn, edge_list&& next_edges) + : Error( + "derivative for " + forward_fn + " is not implemented", + std::move(next_edges)) {} + + NotImplemented(const std::string& forward_fn) + : Error("derivative for " + forward_fn + " is not implemented") {} +}; + +// Identity in forward, Error in backward. Used to implement +// @once_differentiable +struct TORCH_API DelayedError : public Node { + DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) { + // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) + for (const auto i : c10::irange(num_inputs)) { + (void)i; // Suppress unused variable warning + add_input_metadata(Node::undefined_input()); + } + } + + variable_list apply(variable_list&& inputs) override; + + std::string msg; +}; + +struct TORCH_API UndefinedGrad : public Node { + UndefinedGrad() { + add_input_metadata(Node::undefined_input()); + } + + variable_list apply(variable_list&& inputs) override; +}; + +struct TORCH_API UndefinedGradBackward : public Node { + UndefinedGradBackward(edge_list&& next_edges) : Node(std::move(next_edges)) {} + + UndefinedGradBackward() = default; + + variable_list apply(variable_list&& inputs) override; + + void compiled_args(CompiledNodeArgs& args) override {} + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override { + return apply(variable_list(inputs)); + } +}; + +struct TORCH_API GraphRoot : public Node { + GraphRoot(edge_list functions, variable_list inputs) + : Node(std::move(functions)), outputs(std::move(inputs)) { + // Ensures calls to stream() on a GraphRoot instance reflect current + // stream(s) on devices of root grad tensors at the time the instance is + // constructed. + for (const auto& t : outputs) { + add_input_metadata(t); + } + } + + variable_list apply(variable_list&& inputs) override { + return outputs; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + variable_list outputs; +}; + +struct TORCH_API Identity : public Node { + variable_list apply(variable_list&& inputs) override; +}; + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h new file mode 100644 index 0000000000000000000000000000000000000000..9b1f0daf50bce3dc2d34cffc99274db916643a5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include +#include + +namespace torch { +namespace autograd { + +struct TORCH_CUDA_CU_API Scatter : public Node { + explicit Scatter( + std::vector devices, + c10::optional> chunk_sizes = c10::nullopt, + int64_t dim = 0, + c10::optional>> streams = + c10::nullopt, + bool unsqueeze_scalars = false); + ~Scatter() override; + + variable_list apply(variable_list&& inputs) override; + + std::vector devices_; + c10::optional> chunk_sizes_; + int64_t dim_; + c10::optional>> streams_; + bool unsqueeze_scalars_; +}; + +struct TORCH_CUDA_CU_API Gather : public Node { + explicit Gather(const at::Device& destination_device, int64_t dim = 0); + ~Gather() override; + + variable_list apply(variable_list&& inputs) override; + + at::Device destination_device_; + int64_t dim_; +}; + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..94b3c9c679969b5bf3b6e9581fb06125d236c1fb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace py = pybind11; + +namespace pybind11 { +namespace detail {} +} // namespace pybind11 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..6e99ed6ae2aa14b8bcc8071c124f034b8d843e3d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include +#include + +namespace torch { +namespace autograd { + +struct TORCH_API CopyBackwards : public Node { + variable_list apply(variable_list&& grads) override; + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + at::TensorOptions src_options; +}; + +// Note [View + Inplace update for base tensor] +// +// This note covers a few important topics related to view + inplace handling. +// - It explains what is the CopySlices Node and why we need it. +// - It explains the considerations on what is saved for backward in +// CopySlices. +// - It explains why we need to sometimes change the exec_info of the current +// backward +// +// What is CopySlices? +// ~~~~~~~~~~~~~~~~~~~ +// +// We support autograd with inplace mutation; e.g., if you write x.mul_(2) +// the autograd will work as if you now had multiple Tensors under the hood and +// you did +// x = t.clone() +// x0 = x +// x1 = x0 * 2 +// x = x1 +// As you can see here, after this operation, x.grad_fn now points to x1.grad_fn +// (the MulBackward node) and this node points to x's original grad_fn (which is +// also x0.grad_fn). It is important to keep in mind that after the inplace, +// there is no Tensor object that represents the x0 state anymore. But the graph +// for it is still around in autograd (in case x was used before being modified +// inplace). See Example 1 in +// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE +// We call this rebasing the history of the Tensor. +// +// Now, a difficult situation is what happens if x is a differentiable view +// of a base b. +// b = t.clone() +// x = b.select(0, 0) +// x *= 2 +// With the same approach as above, this will become +// b = t.clone() +// x = b.select(0, 0) +// b0 = b +// x0 = x +// x1 = x0 * 2 +// b1 = b0.select_scatter(x1, 0, 0) +// x2 = b1.select(0, 0) +// x = x2 +// b = b1 +// As you can see here, not only we need to modify x's grad_fn, we also need to +// modify the one from b. We also need to ensure that the new grad_fn on x is +// linked to b's new grad_fn. The chain the select_scatter, multiplication and +// select is what CopySlices does, all wrapped into a single Node. +// +// See Example 1 in +// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE +// +// What do we need to save in CopySlices to run backward? +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// We need to perform grad_view = fn(grad_view), but out-of-place. +// view_fn_ is an optional function saved in DifferentiableViewMeta +// from forward pass, so that we can recover we when as_strided is not +// supported. It preserves the invariants: +// view = view_fn_(base) +// grad_view = view_fn_(grad_base) +// +// When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_ +// is empty and we save TensorGeometry(view) instead. +// With the TensorGeometry information we can use `as_strided` call which +// is more efficient to recover views in backward. +// +// For example: +// view_1 = view_op_1(base) +// view_2 = view_op_2(view_1) +// ... +// view_n = view_op_n(view_n-1) +// view_n = inplace_op(view_n) +// +// In CPU/CUDA case where we support efficient as_strided implementation, +// grad_view_n can be calculated through 1 step. +// +// grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset); +// +// But in XLA backend where we don't have full support of as_strided, +// it has to save a chained lambda function view_fn_, to exactly +// replay how the view was done in forward. +// +// view_fn_ = view_op_n(...(view_op_2(view_op_1()))) +// grad_view_n = view_fn_(grad_base) +// +// This chain view_fn_ works as long as forward view ops are implemented, +// e.g XLA simulates view without a real Storage behind Tensor, but it's less +// efficient than the as_strided one so we should be careful to only use it when +// necessary. +// +// - For CPU/CUDA we save TensorGeometry of both base and view tensors, +// That's all we need to pass into as_strided. +// E.g. int[] sizes, int[] strides, and int storage_offset. +// - For XLA we use view_fn_, which captures all forward view op arguments +// by **value**. +// E.g for at::narrow, int dim, int start, in length are saved. +// +// Theoretically we could also save Tensor `view` in CopySlices Node, but +// it's far more expensive than what we currently save. +// 1. We cannot afford keeping large tensors alive to recover views only. +// 2. There are inplace checks when Tensors are loaded back to make sure +// they haven't been changed (including size metadata). +// So saving metadata like TensorGeometry/view arguments is much better +// because it is minimal information needed to recover views, as well as it +// allows the user to modify the original Tensor without preventing the +// backward pass from running. +// +// Why do we manually change exec_info in the apply? +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Using the same example as before, +// b = t.clone() +// x = b.select(0, 0) +// x *= y +// +// You can see the visualization at +// https://docs.google.com/drawings/d/1Bx-Hcz-zlIv7PabQqnPhUIVIs9F8WWi48svqMsAUMFs +// which contains the wrapped MulBackward Node and show what it links to. +// Since a backward can happen between any subset of the inputs (t and y) and +// outputs (o, x, b). It is possible to get into a state where CopySlices's 0th +// next function (CloneBackward) needs gradient but MulBackward's 0th next +// function (SelectBackward) is not. This happens if you do autograd.grad +// between x and t for example. +// In such a case, we do need to mark SelectBackward as requiring gradient such +// that, during the execution of MulBackward, we will actually compute gradient +// for the 0th input. +// +// All the other next functions are always shared (this is asserted in the apply +// code) and so nothing needs to be done for them. + +// See Note [View + Inplace update for view tensor] for what we do to view +// tensor when an in-place operation happens. +struct TORCH_API CopySlices : public Node { + CopySlices( + const Variable& base_var, + at::TensorGeometry view_, + std::unique_ptr view_fn_, + std::shared_ptr fn_); + + // common code between apply/apply_with_saved + template + variable_list apply_impl(variable_list&& inputs, const T& call_fn); + + variable_list apply(variable_list&& inputs) override; + void release_variables() override; + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + at::TensorGeometry base; + // view and view_fn are redundant and view_fn will be used if available. + // See Note [View + Inplace update for base tensor] for details. + at::TensorGeometry view; + std::unique_ptr view_fn; + std::shared_ptr fn; +}; + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3cc2575da8f5dd20edd29eb88993e9499a6816ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h @@ -0,0 +1,114 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace torch { +namespace autograd { + +using function_constructor = std::function(edge_list&&)>; + +/** + * Wraps the tensor outputs in variables and creates the grad_fn and sets the + * grad_fn if necessary. + */ +TORCH_API variable_list wrap_outputs( + const variable_list& inputs, + tensor_list&& outputs, + const function_constructor& ctr); + +/// Checks that inputs contains exactly `args` items and that the first +/// `required_args` +/// items are not nullptr. If not specified, `required_args` defaults to `args`. +TORCH_API void check_input_variables( + const char* name, + const variable_list& inputs, + int args, + int required_args = -1, + bool allow_undefined = false); + +struct ComputeRequiresGrad : IterArgs { + bool out = false; + using IterArgs::operator(); + void operator()(const at::Tensor& tensor) { + const auto& var = static_cast(tensor); + if (var.defined() && var.requires_grad()) { + out = true; + } + } + void operator()(const c10::optional& tensor) { + if (tensor.has_value()) { + (*this)(*tensor); + } + } + bool short_circuit() { + return out; + } +}; + +template +inline bool compute_requires_grad(Args&&... args) { + if (!GradMode::is_enabled()) { + return false; + } + return ComputeRequiresGrad().apply(std::forward(args)...).out; +} + +inline void set_history( + const at::Tensor& variable, + const std::shared_ptr& grad_fn) { + TORCH_CHECK(grad_fn != nullptr); + if (variable.defined()) { + // If the codegen triggers this, you most likely want to add your newly + // added function to the DONT_REQUIRE_DERIVATIVE list in + // tools/autograd/gen_variable_type.py + TORCH_INTERNAL_ASSERT(isDifferentiableType(variable.scalar_type())); + auto output_nr = grad_fn->add_input_metadata(variable); + impl::set_gradient_edge(variable, {grad_fn, output_nr}); + } else { + grad_fn->add_input_metadata(Node::undefined_input()); + } +} + +inline void set_history( + const std::vector& variables, + const std::shared_ptr& grad_fn) { + for (auto& variable : variables) { + set_history(variable, grad_fn); + } +} + +inline bool isFwGradDefined(const c10::optional& t) { + return t.has_value() && t->defined() && t->_fw_grad(/*level */ 0).defined(); +} + +inline bool isFwGradDefinedTensorList(const at::ITensorListRef& variables) { + bool ret = false; + for (auto& variable : variables) { + ret |= isFwGradDefined(variable); + } + return ret; +} + +inline bool isFwGradDefinedTensorList( + const c10::List>& li) { + bool ret = false; + for (auto i : c10::irange(li.size())) { + auto t = li.get(i); + ret |= (t.has_value() && isFwGradDefined(t.value())); + } + return ret; +} + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0b574d46f4a5f393482e5bfb1329aa4da1a03011 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h @@ -0,0 +1,14788 @@ +#pragma once + +// @generated from ../tools/autograd/templates/Functions.h + +#include +#include +#include + +#include "torch/csrc/autograd/function.h" +#include "torch/csrc/autograd/variable.h" +#include "torch/csrc/autograd/saved_variable.h" +#include + +#include + +namespace torch { namespace autograd { namespace generated { + +using at::Scalar; +using at::Tensor; +using at::IntArrayRef; +using at::ArrayRef; +using at::Type; +using at::TensorGeometry; +using at::ScalarType; +using c10::optional; +using c10::fmap; + +inline std::vector unpack_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { + // NB: we must explicitly do the conversion in the lambda, otherwise template + // deduction will give a Tensor of Variable which is not convertible + return fmap(xs, [&saved_for](const SavedVariable& x) { + // TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring. + return static_cast(x.unpack(saved_for)); + }); +} + +inline c10::List> unpack_opt_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { + torch::List> result; + result.reserve(xs.size()); + for (const SavedVariable& v : xs) { + auto var = v.unpack(saved_for); + result.push_back(var.defined() ? c10::optional(var) : c10::nullopt); + } + return result; +} + +using torch::autograd::TypeAndSize; + +#ifdef _WIN32 +struct AbsBackward0 : public TraceableFunction { + TORCH_API AbsBackward0() = default; +#else +struct TORCH_API AbsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AbsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AcosBackward0 : public TraceableFunction { + TORCH_API AcosBackward0() = default; +#else +struct TORCH_API AcosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AcosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AddBackward0 : public TraceableFunction { + TORCH_API AddBackward0() = default; +#else +struct TORCH_API AddBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType other_scalar_type; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct AddBackward1 : public TraceableFunction { + TORCH_API AddBackward1() = default; +#else +struct TORCH_API AddBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct AddbmmBackward0 : public TraceableFunction { + TORCH_API AddbmmBackward0() = default; +#else +struct TORCH_API AddbmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddbmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + batch1_.reset_data(); + batch2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable batch1_; + c10::SymInt batch1_sym_argsize_0; + c10::SymInt batch1_sym_argsize_1; + SavedVariable batch2_; + c10::SymInt batch2_sym_argsize_2; + at::Scalar beta; + +}; +#ifdef _WIN32 +struct AddcdivBackward0 : public TraceableFunction { + TORCH_API AddcdivBackward0() = default; +#else +struct TORCH_API AddcdivBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddcdivBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + tensor1_.reset_data(); + tensor2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + SavedVariable tensor1_; + at::ScalarType tensor1_scalar_type; + SavedVariable tensor2_; + at::ScalarType tensor2_scalar_type; + at::Scalar value; + +}; +#ifdef _WIN32 +struct AddcmulBackward0 : public TraceableFunction { + TORCH_API AddcmulBackward0() = default; +#else +struct TORCH_API AddcmulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddcmulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + tensor1_.reset_data(); + tensor2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + SavedVariable tensor1_; + at::ScalarType tensor1_scalar_type; + SavedVariable tensor2_; + at::ScalarType tensor2_scalar_type; + at::Scalar value; + +}; +#ifdef _WIN32 +struct AddmmBackward0 : public TraceableFunction { + TORCH_API AddmmBackward0() = default; +#else +struct TORCH_API AddmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat1_.reset_data(); + mat2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat1_; + at::Layout mat1_layout; + std::vector mat1_sym_sizes; + std::vector mat1_sym_strides; + SavedVariable mat2_; + at::Layout mat2_layout; + std::vector mat2_sym_sizes; + std::vector mat2_sym_strides; + +}; +#ifdef _WIN32 +struct SparseAddmmBackward0 : public TraceableFunction { + TORCH_API SparseAddmmBackward0() = default; +#else +struct TORCH_API SparseAddmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseAddmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat1_.reset_data(); + mat2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat1_; + SavedVariable mat2_; + at::Layout mat2_layout; + std::vector mat2_sym_sizes; + std::vector mat2_sym_strides; + +}; +#ifdef _WIN32 +struct AddmvBackward0 : public TraceableFunction { + TORCH_API AddmvBackward0() = default; +#else +struct TORCH_API AddmvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddmvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat_.reset_data(); + vec_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat_; + SavedVariable vec_; + +}; +#ifdef _WIN32 +struct AddrBackward0 : public TraceableFunction { + TORCH_API AddrBackward0() = default; +#else +struct TORCH_API AddrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + vec1_.reset_data(); + vec2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable vec1_; + SavedVariable vec2_; + +}; +#ifdef _WIN32 +struct AffineGridGeneratorBackward0 : public TraceableFunction { + TORCH_API AffineGridGeneratorBackward0() = default; +#else +struct TORCH_API AffineGridGeneratorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AffineGridGeneratorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector size; + +}; +#ifdef _WIN32 +struct AliasBackward0 : public Node { + TORCH_API AliasBackward0() = default; +#else +struct TORCH_API AliasBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AliasBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AngleBackward0 : public TraceableFunction { + TORCH_API AngleBackward0() = default; +#else +struct TORCH_API AngleBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AngleBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AcoshBackward0 : public TraceableFunction { + TORCH_API AcoshBackward0() = default; +#else +struct TORCH_API AcoshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AcoshBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AcoshBackward1 : public TraceableFunction { + TORCH_API AcoshBackward1() = default; +#else +struct TORCH_API AcoshBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AcoshBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AsinhBackward0 : public TraceableFunction { + TORCH_API AsinhBackward0() = default; +#else +struct TORCH_API AsinhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsinhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AsinhBackward1 : public TraceableFunction { + TORCH_API AsinhBackward1() = default; +#else +struct TORCH_API AsinhBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsinhBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AtanhBackward0 : public TraceableFunction { + TORCH_API AtanhBackward0() = default; +#else +struct TORCH_API AtanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AtanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AtanhBackward1 : public TraceableFunction { + TORCH_API AtanhBackward1() = default; +#else +struct TORCH_API AtanhBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AtanhBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AsStridedBackward0 : public Node { + TORCH_API AsStridedBackward0() = default; +#else +struct TORCH_API AsStridedBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct AsStridedBackward1 : public TraceableFunction { + TORCH_API AsStridedBackward1() = default; +#else +struct TORCH_API AsStridedBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct AsinBackward0 : public TraceableFunction { + TORCH_API AsinBackward0() = default; +#else +struct TORCH_API AsinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AtanBackward0 : public TraceableFunction { + TORCH_API AtanBackward0() = default; +#else +struct TORCH_API AtanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AtanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Atan2Backward0 : public TraceableFunction { + TORCH_API Atan2Backward0() = default; +#else +struct TORCH_API Atan2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Atan2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct BaddbmmBackward0 : public TraceableFunction { + TORCH_API BaddbmmBackward0() = default; +#else +struct TORCH_API BaddbmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BaddbmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + batch1_.reset_data(); + batch2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable batch1_; + SavedVariable batch2_; + at::Scalar beta; + +}; +#ifdef _WIN32 +struct BernoulliBackward0 : public TraceableFunction { + TORCH_API BernoulliBackward0() = default; +#else +struct TORCH_API BernoulliBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BernoulliBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct BernoulliBackward1 : public TraceableFunction { + TORCH_API BernoulliBackward1() = default; +#else +struct TORCH_API BernoulliBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BernoulliBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize p_info; + +}; +#ifdef _WIN32 +struct BernoulliBackward2 : public TraceableFunction { + TORCH_API BernoulliBackward2() = default; +#else +struct TORCH_API BernoulliBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BernoulliBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct BmmBackward0 : public TraceableFunction { + TORCH_API BmmBackward0() = default; +#else +struct TORCH_API BmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat2_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mat2_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MatmulBackward0 : public TraceableFunction { + TORCH_API MatmulBackward0() = default; +#else +struct TORCH_API MatmulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MatmulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CatBackward0 : public TraceableFunction { + TORCH_API CatBackward0() = default; +#else +struct TORCH_API CatBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CatBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + ::std::vector tensors_args_scalartypes; + ::std::vector<::std::vector> tensors_args_sizes_symint; + size_t tensors_size_; +}; +#ifdef _WIN32 +struct CauchyBackward0 : public TraceableFunction { + TORCH_API CauchyBackward0() = default; +#else +struct TORCH_API CauchyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CauchyBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct CeilBackward0 : public TraceableFunction { + TORCH_API CeilBackward0() = default; +#else +struct TORCH_API CeilBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CeilBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct CholeskyBackward0 : public TraceableFunction { + TORCH_API CholeskyBackward0() = default; +#else +struct TORCH_API CholeskyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CholeskyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgCholeskyExBackward0 : public TraceableFunction { + TORCH_API LinalgCholeskyExBackward0() = default; +#else +struct TORCH_API LinalgCholeskyExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgCholeskyExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + L_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool upper; + SavedVariable L_; + +}; +#ifdef _WIN32 +struct CholeskySolveBackward0 : public TraceableFunction { + TORCH_API CholeskySolveBackward0() = default; +#else +struct TORCH_API CholeskySolveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CholeskySolveBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input2_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input2_; + SavedVariable self_; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CholeskyInverseBackward0 : public TraceableFunction { + TORCH_API CholeskyInverseBackward0() = default; +#else +struct TORCH_API CholeskyInverseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CholeskyInverseBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ClampBackward0 : public TraceableFunction { + TORCH_API ClampBackward0() = default; +#else +struct TORCH_API ClampBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + max_.reset_data(); + min_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable max_; + SavedVariable min_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampBackward1 : public TraceableFunction { + TORCH_API ClampBackward1() = default; +#else +struct TORCH_API ClampBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional max; + c10::optional min; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMinBackward0 : public TraceableFunction { + TORCH_API ClampMinBackward0() = default; +#else +struct TORCH_API ClampMinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar min; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMinBackward1 : public TraceableFunction { + TORCH_API ClampMinBackward1() = default; +#else +struct TORCH_API ClampMinBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMinBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + min_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable min_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMaxBackward0 : public TraceableFunction { + TORCH_API ClampMaxBackward0() = default; +#else +struct TORCH_API ClampMaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar max; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMaxBackward1 : public TraceableFunction { + TORCH_API ClampMaxBackward1() = default; +#else +struct TORCH_API ClampMaxBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMaxBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + max_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable max_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CloneBackward0 : public TraceableFunction { + TORCH_API CloneBackward0() = default; +#else +struct TORCH_API CloneBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CloneBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LazyCloneBackward0 : public TraceableFunction { + TORCH_API LazyCloneBackward0() = default; +#else +struct TORCH_API LazyCloneBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LazyCloneBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ToCopyBackward0 : public TraceableFunction { + TORCH_API ToCopyBackward0() = default; +#else +struct TORCH_API ToCopyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToCopyBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct CoalesceBackward0 : public TraceableFunction { + TORCH_API CoalesceBackward0() = default; +#else +struct TORCH_API CoalesceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CoalesceBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ComplexBackward0 : public TraceableFunction { + TORCH_API ComplexBackward0() = default; +#else +struct TORCH_API ComplexBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ComplexBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + imag_.reset_data(); + real_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable imag_; + SavedVariable real_; + +}; +#ifdef _WIN32 +struct PolarBackward0 : public TraceableFunction { + TORCH_API PolarBackward0() = default; +#else +struct TORCH_API PolarBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PolarBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ConjBackward0 : public Node { + TORCH_API ConjBackward0() = default; +#else +struct TORCH_API ConjBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NegViewBackward0 : public Node { + TORCH_API NegViewBackward0() = default; +#else +struct TORCH_API NegViewBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NegViewBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ConjPhysicalBackward0 : public TraceableFunction { + TORCH_API ConjPhysicalBackward0() = default; +#else +struct TORCH_API ConjPhysicalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjPhysicalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ConjPhysicalBackward1 : public TraceableFunction { + TORCH_API ConjPhysicalBackward1() = default; +#else +struct TORCH_API ConjPhysicalBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjPhysicalBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct CopysignBackward0 : public TraceableFunction { + TORCH_API CopysignBackward0() = default; +#else +struct TORCH_API CopysignBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CopysignBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CopysignBackward1 : public TraceableFunction { + TORCH_API CopysignBackward1() = default; +#else +struct TORCH_API CopysignBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CopysignBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CosBackward0 : public TraceableFunction { + TORCH_API CosBackward0() = default; +#else +struct TORCH_API CosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CoshBackward0 : public TraceableFunction { + TORCH_API CoshBackward0() = default; +#else +struct TORCH_API CoshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CoshBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LinalgCrossBackward0 : public TraceableFunction { + TORCH_API LinalgCrossBackward0() = default; +#else +struct TORCH_API LinalgCrossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgCrossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogcumsumexpBackward0 : public TraceableFunction { + TORCH_API LogcumsumexpBackward0() = default; +#else +struct TORCH_API LogcumsumexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogcumsumexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CumprodBackward0 : public TraceableFunction { + TORCH_API CumprodBackward0() = default; +#else +struct TORCH_API CumprodBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CumprodBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::ScalarType self_scalar_type; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CumsumBackward0 : public TraceableFunction { + TORCH_API CumsumBackward0() = default; +#else +struct TORCH_API CumsumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CumsumBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct CummaxBackward0 : public TraceableFunction { + TORCH_API CummaxBackward0() = default; +#else +struct TORCH_API CummaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CummaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct CumminBackward0 : public TraceableFunction { + TORCH_API CumminBackward0() = default; +#else +struct TORCH_API CumminBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CumminBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct ConvTbcBackward0 : public TraceableFunction { + TORCH_API ConvTbcBackward0() = default; +#else +struct TORCH_API ConvTbcBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvTbcBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + bias_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable bias_; + int64_t pad = 0; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct CtcLossBackward0 : public TraceableFunction { + TORCH_API CtcLossBackward0() = default; +#else +struct TORCH_API CtcLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CtcLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + log_probs_.reset_data(); + targets_.reset_data(); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t blank = 0; + std::vector input_lengths; + SavedVariable log_probs_; + std::vector target_lengths; + SavedVariable targets_; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct CtcLossBackward1 : public TraceableFunction { + TORCH_API CtcLossBackward1() = default; +#else +struct TORCH_API CtcLossBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CtcLossBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_lengths_.reset_data(); + log_probs_.reset_data(); + target_lengths_.reset_data(); + targets_.reset_data(); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t blank = 0; + SavedVariable input_lengths_; + SavedVariable log_probs_; + SavedVariable target_lengths_; + SavedVariable targets_; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct Deg2RadBackward0 : public TraceableFunction { + TORCH_API Deg2RadBackward0() = default; +#else +struct TORCH_API Deg2RadBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Deg2RadBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LinalgDetBackward0 : public TraceableFunction { + TORCH_API LinalgDetBackward0() = default; +#else +struct TORCH_API LinalgDetBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgDetBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + LU_.reset_data(); + pivots_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + SavedVariable LU_; + SavedVariable pivots_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgSlogdetBackward0 : public TraceableFunction { + TORCH_API LinalgSlogdetBackward0() = default; +#else +struct TORCH_API LinalgSlogdetBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSlogdetBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + LU_.reset_data(); + pivots_.reset_data(); + sign_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + SavedVariable LU_; + SavedVariable pivots_; + SavedVariable sign_; + +}; +#ifdef _WIN32 +struct BlockDiagBackward0 : public TraceableFunction { + TORCH_API BlockDiagBackward0() = default; +#else +struct TORCH_API BlockDiagBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BlockDiagBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + ::std::vector tensors_args_scalartypes; + ::std::vector<::std::vector> tensors_args_sizes; + size_t tensors_size_; +}; +#ifdef _WIN32 +struct DiagEmbedBackward0 : public TraceableFunction { + TORCH_API DiagEmbedBackward0() = default; +#else +struct TORCH_API DiagEmbedBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagEmbedBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + +}; +#ifdef _WIN32 +struct DiagonalBackward0 : public Node { + TORCH_API DiagonalBackward0() = default; +#else +struct TORCH_API DiagonalBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct DiagonalBackwardBackward0 : public TraceableFunction { + TORCH_API DiagonalBackwardBackward0() = default; +#else +struct TORCH_API DiagonalBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + +}; +#ifdef _WIN32 +struct DistBackward0 : public TraceableFunction { + TORCH_API DistBackward0() = default; +#else +struct TORCH_API DistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct DivBackward0 : public TraceableFunction { + TORCH_API DivBackward0() = default; +#else +struct TORCH_API DivBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DivBackward1 : public TraceableFunction { + TORCH_API DivBackward1() = default; +#else +struct TORCH_API DivBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DivBackward2 : public TraceableFunction { + TORCH_API DivBackward2() = default; +#else +struct TORCH_API DivBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + c10::optional rounding_mode; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DivBackward3 : public TraceableFunction { + TORCH_API DivBackward3() = default; +#else +struct TORCH_API DivBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + c10::optional rounding_mode; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DotBackward0 : public TraceableFunction { + TORCH_API DotBackward0() = default; +#else +struct TORCH_API DotBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DotBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + tensor_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable tensor_; + +}; +#ifdef _WIN32 +struct VdotBackward0 : public TraceableFunction { + TORCH_API VdotBackward0() = default; +#else +struct TORCH_API VdotBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "VdotBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FusedDropoutBackward0 : public TraceableFunction { + TORCH_API FusedDropoutBackward0() = default; +#else +struct TORCH_API FusedDropoutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FusedDropoutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct NativeDropoutBackward0 : public TraceableFunction { + TORCH_API NativeDropoutBackward0() = default; +#else +struct TORCH_API NativeDropoutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeDropoutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + c10::optional train; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct NativeDropoutBackwardBackward0 : public TraceableFunction { + TORCH_API NativeDropoutBackwardBackward0() = default; +#else +struct TORCH_API NativeDropoutBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeDropoutBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable mask_; + double scale; + +}; +#ifdef _WIN32 +struct EqBackward0 : public TraceableFunction { + TORCH_API EqBackward0() = default; +#else +struct TORCH_API EqBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EqBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct EqBackward1 : public TraceableFunction { + TORCH_API EqBackward1() = default; +#else +struct TORCH_API EqBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EqBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ErfBackward0 : public TraceableFunction { + TORCH_API ErfBackward0() = default; +#else +struct TORCH_API ErfBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ErfBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ErfcBackward0 : public TraceableFunction { + TORCH_API ErfcBackward0() = default; +#else +struct TORCH_API ErfcBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ErfcBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialErfcxBackward0 : public TraceableFunction { + TORCH_API SpecialErfcxBackward0() = default; +#else +struct TORCH_API SpecialErfcxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialErfcxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ErfinvBackward0 : public TraceableFunction { + TORCH_API ErfinvBackward0() = default; +#else +struct TORCH_API ErfinvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ErfinvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ExpBackward0 : public TraceableFunction { + TORCH_API ExpBackward0() = default; +#else +struct TORCH_API ExpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct Exp2Backward0 : public TraceableFunction { + TORCH_API Exp2Backward0() = default; +#else +struct TORCH_API Exp2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Exp2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct Expm1Backward0 : public TraceableFunction { + TORCH_API Expm1Backward0() = default; +#else +struct TORCH_API Expm1Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Expm1Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ExpandBackward0 : public Node { + TORCH_API ExpandBackward0() = default; +#else +struct TORCH_API ExpandBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExpandBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ExponentialBackward0 : public TraceableFunction { + TORCH_API ExponentialBackward0() = default; +#else +struct TORCH_API ExponentialBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExponentialBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FakeQuantizePerTensorAffineCachemaskBackward0 : public TraceableFunction { + TORCH_API FakeQuantizePerTensorAffineCachemaskBackward0() = default; +#else +struct TORCH_API FakeQuantizePerTensorAffineCachemaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizePerTensorAffineCachemaskBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0 : public TraceableFunction { + TORCH_API FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0() = default; +#else +struct TORCH_API FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FakeQuantizeLearnablePerTensorAffineBackward0 : public TraceableFunction { + TORCH_API FakeQuantizeLearnablePerTensorAffineBackward0() = default; +#else +struct TORCH_API FakeQuantizeLearnablePerTensorAffineBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizeLearnablePerTensorAffineBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scale_.reset_data(); + self_.reset_data(); + zero_point_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double grad_factor; + int64_t quant_max = 0; + int64_t quant_min = 0; + SavedVariable scale_; + SavedVariable self_; + SavedVariable zero_point_; + +}; +#ifdef _WIN32 +struct FakeQuantizePerChannelAffineCachemaskBackward0 : public TraceableFunction { + TORCH_API FakeQuantizePerChannelAffineCachemaskBackward0() = default; +#else +struct TORCH_API FakeQuantizePerChannelAffineCachemaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizePerChannelAffineCachemaskBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FakeQuantizeLearnablePerChannelAffineBackward0 : public TraceableFunction { + TORCH_API FakeQuantizeLearnablePerChannelAffineBackward0() = default; +#else +struct TORCH_API FakeQuantizeLearnablePerChannelAffineBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizeLearnablePerChannelAffineBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scale_.reset_data(); + self_.reset_data(); + zero_point_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t axis = 0; + double grad_factor; + int64_t quant_max = 0; + int64_t quant_min = 0; + SavedVariable scale_; + SavedVariable self_; + SavedVariable zero_point_; + +}; +#ifdef _WIN32 +struct FusedMovingAvgObsFqHelperBackward0 : public TraceableFunction { + TORCH_API FusedMovingAvgObsFqHelperBackward0() = default; +#else +struct TORCH_API FusedMovingAvgObsFqHelperBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FusedMovingAvgObsFqHelperBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FillBackward0 : public TraceableFunction { + TORCH_API FillBackward0() = default; +#else +struct TORCH_API FillBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FillBackward1 : public TraceableFunction { + TORCH_API FillBackward1() = default; +#else +struct TORCH_API FillBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FillBackward2 : public TraceableFunction { + TORCH_API FillBackward2() = default; +#else +struct TORCH_API FillBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FillBackward3 : public TraceableFunction { + TORCH_API FillBackward3() = default; +#else +struct TORCH_API FillBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FloorBackward0 : public TraceableFunction { + TORCH_API FloorBackward0() = default; +#else +struct TORCH_API FloorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FloorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FmodBackward0 : public TraceableFunction { + TORCH_API FmodBackward0() = default; +#else +struct TORCH_API FmodBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FmodBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FmodBackward1 : public TraceableFunction { + TORCH_API FmodBackward1() = default; +#else +struct TORCH_API FmodBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FmodBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FracBackward0 : public TraceableFunction { + TORCH_API FracBackward0() = default; +#else +struct TORCH_API FracBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FracBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FrexpBackward0 : public TraceableFunction { + TORCH_API FrexpBackward0() = default; +#else +struct TORCH_API FrexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FrexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable exponent_; + +}; +#ifdef _WIN32 +struct GatherBackward0 : public TraceableFunction { + TORCH_API GatherBackward0() = default; +#else +struct TORCH_API GatherBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GatherBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + SavedVariable self_; + bool sparse_grad; + +}; +#ifdef _WIN32 +struct GeBackward0 : public TraceableFunction { + TORCH_API GeBackward0() = default; +#else +struct TORCH_API GeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GeBackward1 : public TraceableFunction { + TORCH_API GeBackward1() = default; +#else +struct TORCH_API GeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GeometricBackward0 : public TraceableFunction { + TORCH_API GeometricBackward0() = default; +#else +struct TORCH_API GeometricBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeometricBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct GeqrfBackward0 : public TraceableFunction { + TORCH_API GeqrfBackward0() = default; +#else +struct TORCH_API GeqrfBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeqrfBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct GridSampler2DBackward0 : public TraceableFunction { + TORCH_API GridSampler2DBackward0() = default; +#else +struct TORCH_API GridSampler2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GridSampler2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + input_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + SavedVariable grid_; + SavedVariable input_; + int64_t interpolation_mode = 0; + int64_t padding_mode = 0; + +}; +#ifdef _WIN32 +struct GridSampler3DBackward0 : public TraceableFunction { + TORCH_API GridSampler3DBackward0() = default; +#else +struct TORCH_API GridSampler3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GridSampler3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + input_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + SavedVariable grid_; + SavedVariable input_; + int64_t interpolation_mode = 0; + int64_t padding_mode = 0; + +}; +#ifdef _WIN32 +struct GridSampler2DCpuFallbackBackward0 : public TraceableFunction { + TORCH_API GridSampler2DCpuFallbackBackward0() = default; +#else +struct TORCH_API GridSampler2DCpuFallbackBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GridSampler2DCpuFallbackBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + input_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + SavedVariable grid_; + SavedVariable input_; + int64_t interpolation_mode = 0; + int64_t padding_mode = 0; + +}; +#ifdef _WIN32 +struct GtBackward0 : public TraceableFunction { + TORCH_API GtBackward0() = default; +#else +struct TORCH_API GtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GtBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GtBackward1 : public TraceableFunction { + TORCH_API GtBackward1() = default; +#else +struct TORCH_API GtBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GtBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct HardsigmoidBackward0 : public TraceableFunction { + TORCH_API HardsigmoidBackward0() = default; +#else +struct TORCH_API HardsigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardsigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardswishBackward0 : public TraceableFunction { + TORCH_API HardswishBackward0() = default; +#else +struct TORCH_API HardswishBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardswishBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardswishBackwardBackward0 : public TraceableFunction { + TORCH_API HardswishBackwardBackward0() = default; +#else +struct TORCH_API HardswishBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardswishBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable self_; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct HypotBackward0 : public TraceableFunction { + TORCH_API HypotBackward0() = default; +#else +struct TORCH_API HypotBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HypotBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct I0Backward0 : public TraceableFunction { + TORCH_API I0Backward0() = default; +#else +struct TORCH_API I0Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "I0Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialI0EBackward0 : public TraceableFunction { + TORCH_API SpecialI0EBackward0() = default; +#else +struct TORCH_API SpecialI0EBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialI0EBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SpecialI1Backward0 : public TraceableFunction { + TORCH_API SpecialI1Backward0() = default; +#else +struct TORCH_API SpecialI1Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialI1Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SpecialI1EBackward0 : public TraceableFunction { + TORCH_API SpecialI1EBackward0() = default; +#else +struct TORCH_API SpecialI1EBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialI1EBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct IgammaBackward0 : public TraceableFunction { + TORCH_API IgammaBackward0() = default; +#else +struct TORCH_API IgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct IgammacBackward0 : public TraceableFunction { + TORCH_API IgammacBackward0() = default; +#else +struct TORCH_API IgammacBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IgammacBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct IndexBackward0 : public TraceableFunction { + TORCH_API IndexBackward0() = default; +#else +struct TORCH_API IndexBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector indices_; + bool indices_released_ = false; + at::TensorOptions self_options; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UnsafeIndexBackward0 : public TraceableFunction { + TORCH_API UnsafeIndexBackward0() = default; +#else +struct TORCH_API UnsafeIndexBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeIndexBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector indices_; + bool indices_released_ = false; + at::TensorOptions self_options; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct IndexAddBackward0 : public TraceableFunction { + TORCH_API IndexAddBackward0() = default; +#else +struct TORCH_API IndexAddBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexAddBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + source_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + int64_t dim = 0; + SavedVariable index_; + SavedVariable source_; + int64_t source_dim = 0; + +}; +#ifdef _WIN32 +struct IndexReduceBackward0 : public TraceableFunction { + TORCH_API IndexReduceBackward0() = default; +#else +struct TORCH_API IndexReduceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexReduceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + source_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool include_self; + SavedVariable index_; + std::string reduce; + SavedVariable self_; + SavedVariable source_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct IndexCopyBackward0 : public TraceableFunction { + TORCH_API IndexCopyBackward0() = default; +#else +struct TORCH_API IndexCopyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexCopyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + source_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + SavedVariable source_; + int64_t source_dim = 0; + +}; +#ifdef _WIN32 +struct IndexFillBackward0 : public TraceableFunction { + TORCH_API IndexFillBackward0() = default; +#else +struct TORCH_API IndexFillBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexFillBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct IndexFillBackward1 : public TraceableFunction { + TORCH_API IndexFillBackward1() = default; +#else +struct TORCH_API IndexFillBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexFillBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct IndexPutBackward0 : public TraceableFunction { + TORCH_API IndexPutBackward0() = default; +#else +struct TORCH_API IndexPutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexPutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + std::vector indices_; + bool indices_released_ = false; + torch::autograd::generated::TypeAndSize values_info; + +}; +#ifdef _WIN32 +struct UnsafeIndexPutBackward0 : public TraceableFunction { + TORCH_API UnsafeIndexPutBackward0() = default; +#else +struct TORCH_API UnsafeIndexPutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeIndexPutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + std::vector indices_; + bool indices_released_ = false; + torch::autograd::generated::TypeAndSize values_info; + +}; +#ifdef _WIN32 +struct IndexPutImplBackward0 : public TraceableFunction { + TORCH_API IndexPutImplBackward0() = default; +#else +struct TORCH_API IndexPutImplBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexPutImplBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + std::vector indices_; + bool indices_released_ = false; + torch::autograd::generated::TypeAndSize values_info; + +}; +#ifdef _WIN32 +struct IndexSelectBackward0 : public TraceableFunction { + TORCH_API IndexSelectBackward0() = default; +#else +struct TORCH_API IndexSelectBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexSelectBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct LinalgInvExBackward0 : public TraceableFunction { + TORCH_API LinalgInvExBackward0() = default; +#else +struct TORCH_API LinalgInvExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgInvExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + inverse_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable inverse_; + +}; +#ifdef _WIN32 +struct LinalgPinvBackward0 : public TraceableFunction { + TORCH_API LinalgPinvBackward0() = default; +#else +struct TORCH_API LinalgPinvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgPinvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct KthvalueBackward0 : public TraceableFunction { + TORCH_API KthvalueBackward0() = default; +#else +struct TORCH_API KthvalueBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "KthvalueBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct LeBackward0 : public TraceableFunction { + TORCH_API LeBackward0() = default; +#else +struct TORCH_API LeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LeBackward1 : public TraceableFunction { + TORCH_API LeBackward1() = default; +#else +struct TORCH_API LeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LerpBackward0 : public TraceableFunction { + TORCH_API LerpBackward0() = default; +#else +struct TORCH_API LerpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LerpBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar weight; + +}; +#ifdef _WIN32 +struct LerpBackward1 : public TraceableFunction { + TORCH_API LerpBackward1() = default; +#else +struct TORCH_API LerpBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LerpBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + end_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable end_; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct LgammaBackward0 : public TraceableFunction { + TORCH_API LgammaBackward0() = default; +#else +struct TORCH_API LgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct DigammaBackward0 : public TraceableFunction { + TORCH_API DigammaBackward0() = default; +#else +struct TORCH_API DigammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DigammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct PolygammaBackward0 : public TraceableFunction { + TORCH_API PolygammaBackward0() = default; +#else +struct TORCH_API PolygammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PolygammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t n = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct PolygammaBackward1 : public TraceableFunction { + TORCH_API PolygammaBackward1() = default; +#else +struct TORCH_API PolygammaBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PolygammaBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t n = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogBackward0 : public TraceableFunction { + TORCH_API LogBackward0() = default; +#else +struct TORCH_API LogBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Log10Backward0 : public TraceableFunction { + TORCH_API Log10Backward0() = default; +#else +struct TORCH_API Log10Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Log10Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Log1PBackward0 : public TraceableFunction { + TORCH_API Log1PBackward0() = default; +#else +struct TORCH_API Log1PBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Log1PBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Log2Backward0 : public TraceableFunction { + TORCH_API Log2Backward0() = default; +#else +struct TORCH_API Log2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Log2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogaddexpBackward0 : public TraceableFunction { + TORCH_API LogaddexpBackward0() = default; +#else +struct TORCH_API LogaddexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogaddexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Logaddexp2Backward0 : public TraceableFunction { + TORCH_API Logaddexp2Backward0() = default; +#else +struct TORCH_API Logaddexp2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Logaddexp2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct XlogyBackward0 : public TraceableFunction { + TORCH_API XlogyBackward0() = default; +#else +struct TORCH_API XlogyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "XlogyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct XlogyBackward1 : public TraceableFunction { + TORCH_API XlogyBackward1() = default; +#else +struct TORCH_API XlogyBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "XlogyBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar self; + +}; +#ifdef _WIN32 +struct XlogyBackward2 : public TraceableFunction { + TORCH_API XlogyBackward2() = default; +#else +struct TORCH_API XlogyBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "XlogyBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialXlog1PyBackward0 : public TraceableFunction { + TORCH_API SpecialXlog1PyBackward0() = default; +#else +struct TORCH_API SpecialXlog1PyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialXlog1PyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialXlog1PyBackward1 : public TraceableFunction { + TORCH_API SpecialXlog1PyBackward1() = default; +#else +struct TORCH_API SpecialXlog1PyBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialXlog1PyBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar self; + +}; +#ifdef _WIN32 +struct SpecialXlog1PyBackward2 : public TraceableFunction { + TORCH_API SpecialXlog1PyBackward2() = default; +#else +struct TORCH_API SpecialXlog1PyBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialXlog1PyBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialZetaBackward0 : public TraceableFunction { + TORCH_API SpecialZetaBackward0() = default; +#else +struct TORCH_API SpecialZetaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialZetaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialZetaBackward1 : public TraceableFunction { + TORCH_API SpecialZetaBackward1() = default; +#else +struct TORCH_API SpecialZetaBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialZetaBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar self; + +}; +#ifdef _WIN32 +struct SpecialZetaBackward2 : public TraceableFunction { + TORCH_API SpecialZetaBackward2() = default; +#else +struct TORCH_API SpecialZetaBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialZetaBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LogNormalBackward0 : public TraceableFunction { + TORCH_API LogNormalBackward0() = default; +#else +struct TORCH_API LogNormalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogNormalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LogsumexpBackward0 : public TraceableFunction { + TORCH_API LogsumexpBackward0() = default; +#else +struct TORCH_API LogsumexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogsumexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgLstsqBackward0 : public TraceableFunction { + TORCH_API LinalgLstsqBackward0() = default; +#else +struct TORCH_API LinalgLstsqBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLstsqBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + b_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable b_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LtBackward0 : public TraceableFunction { + TORCH_API LtBackward0() = default; +#else +struct TORCH_API LtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LtBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LtBackward1 : public TraceableFunction { + TORCH_API LtBackward1() = default; +#else +struct TORCH_API LtBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LtBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LinalgLuFactorExBackward0 : public TraceableFunction { + TORCH_API LinalgLuFactorExBackward0() = default; +#else +struct TORCH_API LinalgLuFactorExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLuFactorExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + LU_.reset_data(); + pivots_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool pivot; + SavedVariable LU_; + SavedVariable pivots_; + +}; +#ifdef _WIN32 +struct LinalgLuBackward0 : public TraceableFunction { + TORCH_API LinalgLuBackward0() = default; +#else +struct TORCH_API LinalgLuBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLuBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + L_.reset_data(); + P_.reset_data(); + U_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool pivot; + SavedVariable L_; + SavedVariable P_; + SavedVariable U_; + +}; +#ifdef _WIN32 +struct LinalgLuSolveBackward0 : public TraceableFunction { + TORCH_API LinalgLuSolveBackward0() = default; +#else +struct TORCH_API LinalgLuSolveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLuSolveBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + LU_.reset_data(); + pivots_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable LU_; + bool adjoint; + bool left; + SavedVariable pivots_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LuUnpackBackward0 : public TraceableFunction { + TORCH_API LuUnpackBackward0() = default; +#else +struct TORCH_API LuUnpackBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LuUnpackBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt LU_data_sym_argsize_minus_1; + c10::SymInt LU_data_sym_argsize_minus_2; + +}; +#ifdef _WIN32 +struct MaskedFillBackward0 : public TraceableFunction { + TORCH_API MaskedFillBackward0() = default; +#else +struct TORCH_API MaskedFillBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedFillBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct MaskedFillBackward1 : public TraceableFunction { + TORCH_API MaskedFillBackward1() = default; +#else +struct TORCH_API MaskedFillBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedFillBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct MaskedScatterBackward0 : public TraceableFunction { + TORCH_API MaskedScatterBackward0() = default; +#else +struct TORCH_API MaskedScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedScatterBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + std::vector source_sym_sizes; + +}; +#ifdef _WIN32 +struct MaskedScatterBackwardBackward0 : public TraceableFunction { + TORCH_API MaskedScatterBackwardBackward0() = default; +#else +struct TORCH_API MaskedScatterBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedScatterBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize grad_output_info; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct MaskedSelectBackward0 : public TraceableFunction { + TORCH_API MaskedSelectBackward0() = default; +#else +struct TORCH_API MaskedSelectBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedSelectBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LinalgMatrixExpBackward0 : public TraceableFunction { + TORCH_API LinalgMatrixExpBackward0() = default; +#else +struct TORCH_API LinalgMatrixExpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgMatrixExpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MaxBackward0 : public TraceableFunction { + TORCH_API MaxBackward0() = default; +#else +struct TORCH_API MaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MaxBackward1 : public TraceableFunction { + TORCH_API MaxBackward1() = default; +#else +struct TORCH_API MaxBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MaximumBackward0 : public TraceableFunction { + TORCH_API MaximumBackward0() = default; +#else +struct TORCH_API MaximumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaximumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FmaxBackward0 : public TraceableFunction { + TORCH_API FmaxBackward0() = default; +#else +struct TORCH_API FmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MeanBackward0 : public TraceableFunction { + TORCH_API MeanBackward0() = default; +#else +struct TORCH_API MeanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MeanBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt self_sym_numel; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct MeanBackward1 : public TraceableFunction { + TORCH_API MeanBackward1() = default; +#else +struct TORCH_API MeanBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MeanBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + c10::SymInt self_sym_numel; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct MedianBackward0 : public TraceableFunction { + TORCH_API MedianBackward0() = default; +#else +struct TORCH_API MedianBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MedianBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NanmedianBackward0 : public TraceableFunction { + TORCH_API NanmedianBackward0() = default; +#else +struct TORCH_API NanmedianBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NanmedianBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MedianBackward1 : public TraceableFunction { + TORCH_API MedianBackward1() = default; +#else +struct TORCH_API MedianBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MedianBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct NanmedianBackward1 : public TraceableFunction { + TORCH_API NanmedianBackward1() = default; +#else +struct TORCH_API NanmedianBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NanmedianBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MinBackward0 : public TraceableFunction { + TORCH_API MinBackward0() = default; +#else +struct TORCH_API MinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MinBackward1 : public TraceableFunction { + TORCH_API MinBackward1() = default; +#else +struct TORCH_API MinBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MinBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MinimumBackward0 : public TraceableFunction { + TORCH_API MinimumBackward0() = default; +#else +struct TORCH_API MinimumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MinimumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FminBackward0 : public TraceableFunction { + TORCH_API FminBackward0() = default; +#else +struct TORCH_API FminBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FminBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AmaxBackward0 : public TraceableFunction { + TORCH_API AmaxBackward0() = default; +#else +struct TORCH_API AmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct AminBackward0 : public TraceableFunction { + TORCH_API AminBackward0() = default; +#else +struct TORCH_API AminBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AminBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MmBackward0 : public TraceableFunction { + TORCH_API MmBackward0() = default; +#else +struct TORCH_API MmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat2_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mat2_; + at::Layout mat2_layout; + std::vector mat2_sym_sizes; + std::vector mat2_sym_strides; + SavedVariable self_; + at::Layout self_layout; + std::vector self_sym_sizes; + std::vector self_sym_strides; + +}; +#ifdef _WIN32 +struct ModeBackward0 : public TraceableFunction { + TORCH_API ModeBackward0() = default; +#else +struct TORCH_API ModeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ModeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MulBackward0 : public TraceableFunction { + TORCH_API MulBackward0() = default; +#else +struct TORCH_API MulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::ScalarType other_scalar_type; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct MulBackward1 : public TraceableFunction { + TORCH_API MulBackward1() = default; +#else +struct TORCH_API MulBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MulBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct MvBackward0 : public TraceableFunction { + TORCH_API MvBackward0() = default; +#else +struct TORCH_API MvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + vec_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable vec_; + +}; +#ifdef _WIN32 +struct MvlgammaBackward0 : public TraceableFunction { + TORCH_API MvlgammaBackward0() = default; +#else +struct TORCH_API MvlgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MvlgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t p = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NanToNumBackward0 : public TraceableFunction { + TORCH_API NanToNumBackward0() = default; +#else +struct TORCH_API NanToNumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NanToNumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NativeBatchNormBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormBackward0() = default; +#else +struct TORCH_API NativeBatchNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormLegitBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormLegitBackward0() = default; +#else +struct TORCH_API NativeBatchNormLegitBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormLegitBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormLegitNoTrainingBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormLegitNoTrainingBackward0() = default; +#else +struct TORCH_API NativeBatchNormLegitNoTrainingBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormLegitNoTrainingBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormLegitBackward1 : public TraceableFunction { + TORCH_API NativeBatchNormLegitBackward1() = default; +#else +struct TORCH_API NativeBatchNormLegitBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormLegitBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormBackwardBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormBackwardBackward0() = default; +#else +struct TORCH_API NativeBatchNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_out_.reset_data(); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + save_invstd_.reset_data(); + save_mean_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable grad_out_; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable save_invstd_; + SavedVariable save_mean_; + bool train; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NativeLayerNormBackward0 : public TraceableFunction { + TORCH_API NativeLayerNormBackward0() = default; +#else +struct TORCH_API NativeLayerNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeLayerNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + bias_.reset_data(); + input_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable bias_; + SavedVariable input_; + std::vector normalized_shape; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeLayerNormBackwardBackward0 : public TraceableFunction { + TORCH_API NativeLayerNormBackwardBackward0() = default; +#else +struct TORCH_API NativeLayerNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeLayerNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_out_.reset_data(); + input_.reset_data(); + mean_.reset_data(); + rstd_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_out_; + SavedVariable input_; + SavedVariable mean_; + std::vector normalized_shape; + SavedVariable rstd_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NativeGroupNormBackward0 : public TraceableFunction { + TORCH_API NativeGroupNormBackward0() = default; +#else +struct TORCH_API NativeGroupNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeGroupNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt C; + c10::SymInt HxW; + c10::SymInt N; + double eps; + int64_t group = 0; + SavedVariable input_; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NeBackward0 : public TraceableFunction { + TORCH_API NeBackward0() = default; +#else +struct TORCH_API NeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct NeBackward1 : public TraceableFunction { + TORCH_API NeBackward1() = default; +#else +struct TORCH_API NeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct NegBackward0 : public TraceableFunction { + TORCH_API NegBackward0() = default; +#else +struct TORCH_API NegBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NegBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NextafterBackward0 : public TraceableFunction { + TORCH_API NextafterBackward0() = default; +#else +struct TORCH_API NextafterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NextafterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NormBackward0 : public TraceableFunction { + TORCH_API NormBackward0() = default; +#else +struct TORCH_API NormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NormBackward1 : public TraceableFunction { + TORCH_API NormBackward1() = default; +#else +struct TORCH_API NormBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + c10::optional p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NormBackward2 : public TraceableFunction { + TORCH_API NormBackward2() = default; +#else +struct TORCH_API NormBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NormBackward3 : public TraceableFunction { + TORCH_API NormBackward3() = default; +#else +struct TORCH_API NormBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward3"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + c10::optional p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgVectorNormBackward0 : public TraceableFunction { + TORCH_API LinalgVectorNormBackward0() = default; +#else +struct TORCH_API LinalgVectorNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgVectorNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + at::Scalar ord; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PdistBackward0 : public TraceableFunction { + TORCH_API PdistBackward0() = default; +#else +struct TORCH_API PdistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PdistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PdistBackwardBackward0 : public TraceableFunction { + TORCH_API PdistBackwardBackward0() = default; +#else +struct TORCH_API PdistBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PdistBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct EuclideanDistBackward0 : public TraceableFunction { + TORCH_API EuclideanDistBackward0() = default; +#else +struct TORCH_API EuclideanDistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EuclideanDistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + x1_.reset_data(); + x2_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable x1_; + SavedVariable x2_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CdistBackward0 : public TraceableFunction { + TORCH_API CdistBackward0() = default; +#else +struct TORCH_API CdistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CdistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + x1_.reset_data(); + x2_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + SavedVariable x1_; + SavedVariable x2_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CdistBackwardBackward0 : public TraceableFunction { + TORCH_API CdistBackwardBackward0() = default; +#else +struct TORCH_API CdistBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CdistBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NormalBackward0 : public TraceableFunction { + TORCH_API NormalBackward0() = default; +#else +struct TORCH_API NormalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NormalBackward1 : public TraceableFunction { + TORCH_API NormalBackward1() = default; +#else +struct TORCH_API NormalBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector mean_sym_sizes; + +}; +#ifdef _WIN32 +struct NormalBackward2 : public TraceableFunction { + TORCH_API NormalBackward2() = default; +#else +struct TORCH_API NormalBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector std_sym_sizes; + +}; +#ifdef _WIN32 +struct NormalBackward3 : public TraceableFunction { + TORCH_API NormalBackward3() = default; +#else +struct TORCH_API NormalBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector mean_sym_sizes; + std::vector std_sym_sizes; + +}; +#ifdef _WIN32 +struct LinalgHouseholderProductBackward0 : public TraceableFunction { + TORCH_API LinalgHouseholderProductBackward0() = default; +#else +struct TORCH_API LinalgHouseholderProductBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgHouseholderProductBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + tau_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input_; + SavedVariable tau_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct OrmqrBackward0 : public TraceableFunction { + TORCH_API OrmqrBackward0() = default; +#else +struct TORCH_API OrmqrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "OrmqrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input2_.reset_data(); + input3_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input2_; + SavedVariable input3_; + bool left; + SavedVariable self_; + bool transpose; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PermuteBackward0 : public Node { + TORCH_API PermuteBackward0() = default; +#else +struct TORCH_API PermuteBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PermuteBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + +}; +#ifdef _WIN32 +struct PoissonBackward0 : public TraceableFunction { + TORCH_API PoissonBackward0() = default; +#else +struct TORCH_API PoissonBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PoissonBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct PowBackward0 : public TraceableFunction { + TORCH_API PowBackward0() = default; +#else +struct TORCH_API PowBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PowBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar exponent; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct PowBackward1 : public TraceableFunction { + TORCH_API PowBackward1() = default; +#else +struct TORCH_API PowBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PowBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable exponent_; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PowBackward2 : public TraceableFunction { + TORCH_API PowBackward2() = default; +#else +struct TORCH_API PowBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PowBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable exponent_; + at::Scalar self; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ProdBackward0 : public TraceableFunction { + TORCH_API ProdBackward0() = default; +#else +struct TORCH_API ProdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ProdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ProdBackward1 : public TraceableFunction { + TORCH_API ProdBackward1() = default; +#else +struct TORCH_API ProdBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ProdBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PutBackward0 : public TraceableFunction { + TORCH_API PutBackward0() = default; +#else +struct TORCH_API PutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + source_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + SavedVariable index_; + SavedVariable source_; + torch::autograd::generated::TypeAndSize source_info; + +}; +#ifdef _WIN32 +struct LinalgQrBackward0 : public TraceableFunction { + TORCH_API LinalgQrBackward0() = default; +#else +struct TORCH_API LinalgQrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgQrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + Q_.reset_data(); + R_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::string mode; + SavedVariable Q_; + SavedVariable R_; + +}; +#ifdef _WIN32 +struct Rad2DegBackward0 : public TraceableFunction { + TORCH_API Rad2DegBackward0() = default; +#else +struct TORCH_API Rad2DegBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Rad2DegBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RandomBackward0 : public TraceableFunction { + TORCH_API RandomBackward0() = default; +#else +struct TORCH_API RandomBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RandomBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RandomBackward1 : public TraceableFunction { + TORCH_API RandomBackward1() = default; +#else +struct TORCH_API RandomBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RandomBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RandomBackward2 : public TraceableFunction { + TORCH_API RandomBackward2() = default; +#else +struct TORCH_API RandomBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RandomBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ReciprocalBackward0 : public TraceableFunction { + TORCH_API ReciprocalBackward0() = default; +#else +struct TORCH_API ReciprocalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReciprocalBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct RemainderBackward0 : public TraceableFunction { + TORCH_API RemainderBackward0() = default; +#else +struct TORCH_API RemainderBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RemainderBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RemainderBackward1 : public TraceableFunction { + TORCH_API RemainderBackward1() = default; +#else +struct TORCH_API RemainderBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RemainderBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct RenormBackward0 : public TraceableFunction { + TORCH_API RenormBackward0() = default; +#else +struct TORCH_API RenormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RenormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::Scalar maxnorm; + at::Scalar p; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct RepeatBackward0 : public TraceableFunction { + TORCH_API RepeatBackward0() = default; +#else +struct TORCH_API RepeatBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RepeatBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector repeats; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SpecialEntrBackward0 : public TraceableFunction { + TORCH_API SpecialEntrBackward0() = default; +#else +struct TORCH_API SpecialEntrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialEntrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialNdtriBackward0 : public TraceableFunction { + TORCH_API SpecialNdtriBackward0() = default; +#else +struct TORCH_API SpecialNdtriBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialNdtriBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SpecialLogNdtrBackward0 : public TraceableFunction { + TORCH_API SpecialLogNdtrBackward0() = default; +#else +struct TORCH_API SpecialLogNdtrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialLogNdtrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ReshapeAliasBackward0 : public Node { + TORCH_API ReshapeAliasBackward0() = default; +#else +struct TORCH_API ReshapeAliasBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReshapeAliasBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct RoundBackward0 : public TraceableFunction { + TORCH_API RoundBackward0() = default; +#else +struct TORCH_API RoundBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RoundBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RoundBackward1 : public TraceableFunction { + TORCH_API RoundBackward1() = default; +#else +struct TORCH_API RoundBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RoundBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RsqrtBackward0 : public TraceableFunction { + TORCH_API RsqrtBackward0() = default; +#else +struct TORCH_API RsqrtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RsqrtBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ScatterBackward0 : public TraceableFunction { + TORCH_API ScatterBackward0() = default; +#else +struct TORCH_API ScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct ScatterBackward1 : public TraceableFunction { + TORCH_API ScatterBackward1() = default; +#else +struct TORCH_API ScatterBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct ScatterAddBackward0 : public TraceableFunction { + TORCH_API ScatterAddBackward0() = default; +#else +struct TORCH_API ScatterAddBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterAddBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct SelectBackward0 : public Node { + TORCH_API SelectBackward0() = default; +#else +struct TORCH_API SelectBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SelectBackwardAutogradNestedTensor0 : public Node { + TORCH_API SelectBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SelectBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SelectBackwardBackward0 : public TraceableFunction { + TORCH_API SelectBackwardBackward0() = default; +#else +struct TORCH_API SelectBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + +}; +#ifdef _WIN32 +struct SigmoidBackward0 : public TraceableFunction { + TORCH_API SigmoidBackward0() = default; +#else +struct TORCH_API SigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LogitBackward0 : public TraceableFunction { + TORCH_API LogitBackward0() = default; +#else +struct TORCH_API LogitBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogitBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional eps; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SignBackward0 : public TraceableFunction { + TORCH_API SignBackward0() = default; +#else +struct TORCH_API SignBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SignBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct SgnBackward0 : public TraceableFunction { + TORCH_API SgnBackward0() = default; +#else +struct TORCH_API SgnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SgnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SinBackward0 : public TraceableFunction { + TORCH_API SinBackward0() = default; +#else +struct TORCH_API SinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SincBackward0 : public TraceableFunction { + TORCH_API SincBackward0() = default; +#else +struct TORCH_API SincBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SincBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SinhBackward0 : public TraceableFunction { + TORCH_API SinhBackward0() = default; +#else +struct TORCH_API SinhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SinhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SliceBackward0 : public Node { + TORCH_API SliceBackward0() = default; +#else +struct TORCH_API SliceBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::optional end; + std::vector self_sym_sizes; + c10::optional start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SliceBackwardBackward0 : public TraceableFunction { + TORCH_API SliceBackwardBackward0() = default; +#else +struct TORCH_API SliceBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt end; + c10::SymInt start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SliceInverseBackward0 : public Node { + TORCH_API SliceInverseBackward0() = default; +#else +struct TORCH_API SliceInverseBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceInverseBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::optional end; + torch::autograd::generated::TypeAndSize self_info; + c10::optional start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SliceScatterBackward0 : public TraceableFunction { + TORCH_API SliceScatterBackward0() = default; +#else +struct TORCH_API SliceScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::optional end; + torch::autograd::generated::TypeAndSize src_info; + c10::optional start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SelectScatterBackward0 : public TraceableFunction { + TORCH_API SelectScatterBackward0() = default; +#else +struct TORCH_API SelectScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + torch::autograd::generated::TypeAndSize src_info; + +}; +#ifdef _WIN32 +struct DiagonalScatterBackward0 : public TraceableFunction { + TORCH_API DiagonalScatterBackward0() = default; +#else +struct TORCH_API DiagonalScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + torch::autograd::generated::TypeAndSize src_info; + +}; +#ifdef _WIN32 +struct AsStridedScatterBackward0 : public TraceableFunction { + TORCH_API AsStridedScatterBackward0() = default; +#else +struct TORCH_API AsStridedScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + at::TensorGeometry src_geometry; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct LinalgSolveExBackward0 : public TraceableFunction { + TORCH_API LinalgSolveExBackward0() = default; +#else +struct TORCH_API LinalgSolveExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSolveExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + LU_.reset_data(); + pivots_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + bool left; + SavedVariable LU_; + SavedVariable pivots_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SortBackward0 : public TraceableFunction { + TORCH_API SortBackward0() = default; +#else +struct TORCH_API SortBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SortBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct SortBackward1 : public TraceableFunction { + TORCH_API SortBackward1() = default; +#else +struct TORCH_API SortBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SortBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct SplitBackward0 : public Node { + TORCH_API SplitBackward0() = default; +#else +struct TORCH_API SplitBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + c10::SymInt split_size; + +}; +#ifdef _WIN32 +struct UnsafeSplitBackward0 : public TraceableFunction { + TORCH_API UnsafeSplitBackward0() = default; +#else +struct TORCH_API UnsafeSplitBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeSplitBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + c10::SymInt split_size; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackward0 : public Node { + TORCH_API SplitWithSizesBackward0() = default; +#else +struct TORCH_API SplitWithSizesBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackwardAutogradNestedTensor0 : public Node { + TORCH_API SplitWithSizesBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SplitWithSizesBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct UnsafeSplitWithSizesBackward0 : public TraceableFunction { + TORCH_API UnsafeSplitWithSizesBackward0() = default; +#else +struct TORCH_API UnsafeSplitWithSizesBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeSplitWithSizesBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SqrtBackward0 : public TraceableFunction { + TORCH_API SqrtBackward0() = default; +#else +struct TORCH_API SqrtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqrtBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SqueezeBackward0 : public Node { + TORCH_API SqueezeBackward0() = default; +#else +struct TORCH_API SqueezeBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward1 : public Node { + TORCH_API SqueezeBackward1() = default; +#else +struct TORCH_API SqueezeBackward1 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor0 : public Node { + TORCH_API SqueezeBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct SqueezeBackward2 : public Node { + TORCH_API SqueezeBackward2() = default; +#else +struct TORCH_API SqueezeBackward2 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor1 : public Node { + TORCH_API SqueezeBackwardAutogradNestedTensor1() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor1 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t self_dim = 0; + +}; +#ifdef _WIN32 +struct SqueezeBackward3 : public TraceableFunction { + TORCH_API SqueezeBackward3() = default; +#else +struct TORCH_API SqueezeBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward4 : public TraceableFunction { + TORCH_API SqueezeBackward4() = default; +#else +struct TORCH_API SqueezeBackward4 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward4"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward5 : public TraceableFunction { + TORCH_API SqueezeBackward5() = default; +#else +struct TORCH_API SqueezeBackward5 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward5"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct StdBackward0 : public TraceableFunction { + TORCH_API StdBackward0() = default; +#else +struct TORCH_API StdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct StdMeanBackward0 : public TraceableFunction { + TORCH_API StdMeanBackward0() = default; +#else +struct TORCH_API StdMeanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StdMeanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result0_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + SavedVariable result0_; + +}; +#ifdef _WIN32 +struct SubBackward0 : public TraceableFunction { + TORCH_API SubBackward0() = default; +#else +struct TORCH_API SubBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SubBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType other_scalar_type; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct SubBackward1 : public TraceableFunction { + TORCH_API SubBackward1() = default; +#else +struct TORCH_API SubBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SubBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct RsubBackward0 : public TraceableFunction { + TORCH_API RsubBackward0() = default; +#else +struct TORCH_API RsubBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RsubBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType other_scalar_type; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct RsubBackward1 : public TraceableFunction { + TORCH_API RsubBackward1() = default; +#else +struct TORCH_API RsubBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RsubBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct SumBackward0 : public TraceableFunction { + TORCH_API SumBackward0() = default; +#else +struct TORCH_API SumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SumBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SumBackward1 : public TraceableFunction { + TORCH_API SumBackward1() = default; +#else +struct TORCH_API SumBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SumBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SumBackwardAutogradNestedTensor0 : public TraceableFunction { + TORCH_API SumBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SumBackwardAutogradNestedTensor0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SumBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NansumBackward0 : public TraceableFunction { + TORCH_API NansumBackward0() = default; +#else +struct TORCH_API NansumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NansumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct LinalgSvdBackward0 : public TraceableFunction { + TORCH_API LinalgSvdBackward0() = default; +#else +struct TORCH_API LinalgSvdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSvdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + S_.reset_data(); + U_.reset_data(); + Vh_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool full_matrices; + SavedVariable S_; + c10::SymInt S_sym_argsize_minus_1; + SavedVariable U_; + SavedVariable Vh_; + +}; +#ifdef _WIN32 +struct LinalgEighBackward0 : public TraceableFunction { + TORCH_API LinalgEighBackward0() = default; +#else +struct TORCH_API LinalgEighBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgEighBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + eigenvalues_.reset_data(); + eigenvectors_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable eigenvalues_; + SavedVariable eigenvectors_; + +}; +#ifdef _WIN32 +struct LinalgEigBackward0 : public TraceableFunction { + TORCH_API LinalgEigBackward0() = default; +#else +struct TORCH_API LinalgEigBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgEigBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + eigenvalues_.reset_data(); + eigenvectors_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + SavedVariable eigenvalues_; + SavedVariable eigenvectors_; + +}; +#ifdef _WIN32 +struct TBackward0 : public Node { + TORCH_API TBackward0() = default; +#else +struct TORCH_API TBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TBackward1 : public TraceableFunction { + TORCH_API TBackward1() = default; +#else +struct TORCH_API TBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FlipBackward0 : public TraceableFunction { + TORCH_API FlipBackward0() = default; +#else +struct TORCH_API FlipBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FlipBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + +}; +#ifdef _WIN32 +struct RollBackward0 : public TraceableFunction { + TORCH_API RollBackward0() = default; +#else +struct TORCH_API RollBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RollBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + std::vector shifts; + +}; +#ifdef _WIN32 +struct Rot90Backward0 : public TraceableFunction { + TORCH_API Rot90Backward0() = default; +#else +struct TORCH_API Rot90Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Rot90Backward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + int64_t k = 0; + +}; +#ifdef _WIN32 +struct TakeBackward0 : public TraceableFunction { + TORCH_API TakeBackward0() = default; +#else +struct TORCH_API TakeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TakeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable index_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TanBackward0 : public TraceableFunction { + TORCH_API TanBackward0() = default; +#else +struct TORCH_API TanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct TanhBackward0 : public TraceableFunction { + TORCH_API TanhBackward0() = default; +#else +struct TORCH_API TanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct TopkBackward0 : public TraceableFunction { + TORCH_API TopkBackward0() = default; +#else +struct TORCH_API TopkBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TopkBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct TraceBackward0 : public TraceableFunction { + TORCH_API TraceBackward0() = default; +#else +struct TORCH_API TraceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TraceBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct TransposeBackward0 : public Node { + TORCH_API TransposeBackward0() = default; +#else +struct TORCH_API TransposeBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TransposeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim0 = 0; + int64_t dim1 = 0; + +}; +#ifdef _WIN32 +struct TransposeBackward1 : public TraceableFunction { + TORCH_API TransposeBackward1() = default; +#else +struct TORCH_API TransposeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TransposeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim0 = 0; + int64_t dim1 = 0; + +}; +#ifdef _WIN32 +struct TriangularSolveBackward0 : public TraceableFunction { + TORCH_API TriangularSolveBackward0() = default; +#else +struct TORCH_API TriangularSolveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TriangularSolveBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + self_.reset_data(); + solution_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + SavedVariable self_; + bool transpose; + bool unitriangular; + bool upper; + SavedVariable solution_; + +}; +#ifdef _WIN32 +struct LinalgSolveTriangularBackward0 : public TraceableFunction { + TORCH_API LinalgSolveTriangularBackward0() = default; +#else +struct TORCH_API LinalgSolveTriangularBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSolveTriangularBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool left; + SavedVariable self_; + bool unitriangular; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct TrilBackward0 : public TraceableFunction { + TORCH_API TrilBackward0() = default; +#else +struct TORCH_API TrilBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TrilBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t diagonal = 0; + +}; +#ifdef _WIN32 +struct TriuBackward0 : public TraceableFunction { + TORCH_API TriuBackward0() = default; +#else +struct TORCH_API TriuBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TriuBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t diagonal = 0; + +}; +#ifdef _WIN32 +struct TruncBackward0 : public TraceableFunction { + TORCH_API TruncBackward0() = default; +#else +struct TORCH_API TruncBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TruncBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ToDenseBackward0 : public TraceableFunction { + TORCH_API ToDenseBackward0() = default; +#else +struct TORCH_API ToDenseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToDenseBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional masked_grad; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ToSparseBackward0 : public TraceableFunction { + TORCH_API ToSparseBackward0() = default; +#else +struct TORCH_API ToSparseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseBackward1 : public TraceableFunction { + TORCH_API ToSparseBackward1() = default; +#else +struct TORCH_API ToSparseBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseCsrBackward0 : public TraceableFunction { + TORCH_API ToSparseCsrBackward0() = default; +#else +struct TORCH_API ToSparseCsrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseCsrBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseCscBackward0 : public TraceableFunction { + TORCH_API ToSparseCscBackward0() = default; +#else +struct TORCH_API ToSparseCscBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseCscBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseBsrBackward0 : public TraceableFunction { + TORCH_API ToSparseBsrBackward0() = default; +#else +struct TORCH_API ToSparseBsrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBsrBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseBscBackward0 : public TraceableFunction { + TORCH_API ToSparseBscBackward0() = default; +#else +struct TORCH_API ToSparseBscBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBscBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToMkldnnBackward0 : public TraceableFunction { + TORCH_API ToMkldnnBackward0() = default; +#else +struct TORCH_API ToMkldnnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToMkldnnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct UnfoldBackward0 : public Node { + TORCH_API UnfoldBackward0() = default; +#else +struct TORCH_API UnfoldBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnfoldBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dimension = 0; + std::vector self_sym_sizes; + int64_t size = 0; + int64_t step = 0; + +}; +#ifdef _WIN32 +struct UnfoldBackwardBackward0 : public TraceableFunction { + TORCH_API UnfoldBackwardBackward0() = default; +#else +struct TORCH_API UnfoldBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnfoldBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + int64_t size = 0; + int64_t step = 0; + +}; +#ifdef _WIN32 +struct UniformBackward0 : public TraceableFunction { + TORCH_API UniformBackward0() = default; +#else +struct TORCH_API UniformBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniformBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueBackward0 : public TraceableFunction { + TORCH_API UniqueBackward0() = default; +#else +struct TORCH_API UniqueBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueDimBackward0 : public TraceableFunction { + TORCH_API UniqueDimBackward0() = default; +#else +struct TORCH_API UniqueDimBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueDimBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueConsecutiveBackward0 : public TraceableFunction { + TORCH_API UniqueConsecutiveBackward0() = default; +#else +struct TORCH_API UniqueConsecutiveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueConsecutiveBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueDimConsecutiveBackward0 : public TraceableFunction { + TORCH_API UniqueDimConsecutiveBackward0() = default; +#else +struct TORCH_API UniqueDimConsecutiveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueDimConsecutiveBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct Unique2Backward0 : public TraceableFunction { + TORCH_API Unique2Backward0() = default; +#else +struct TORCH_API Unique2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Unique2Backward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UnsafeViewBackward0 : public TraceableFunction { + TORCH_API UnsafeViewBackward0() = default; +#else +struct TORCH_API UnsafeViewBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeViewBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct LiftBackward0 : public TraceableFunction { + TORCH_API LiftBackward0() = default; +#else +struct TORCH_API LiftBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LiftBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LiftFreshBackward0 : public TraceableFunction { + TORCH_API LiftFreshBackward0() = default; +#else +struct TORCH_API LiftFreshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LiftFreshBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UnsqueezeBackward0 : public Node { + TORCH_API UnsqueezeBackward0() = default; +#else +struct TORCH_API UnsqueezeBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsqueezeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct UnsqueezeBackward1 : public TraceableFunction { + TORCH_API UnsqueezeBackward1() = default; +#else +struct TORCH_API UnsqueezeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsqueezeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct VarBackward0 : public TraceableFunction { + TORCH_API VarBackward0() = default; +#else +struct TORCH_API VarBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "VarBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct VarMeanBackward0 : public TraceableFunction { + TORCH_API VarMeanBackward0() = default; +#else +struct TORCH_API VarMeanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "VarMeanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ViewBackward0 : public Node { + TORCH_API ViewBackward0() = default; +#else +struct TORCH_API ViewBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ViewBackwardAutogradNestedTensor0 : public Node { + TORCH_API ViewBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API ViewBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ViewAsRealBackward0 : public Node { + TORCH_API ViewAsRealBackward0() = default; +#else +struct TORCH_API ViewAsRealBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsRealBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ViewAsComplexBackward0 : public Node { + TORCH_API ViewAsComplexBackward0() = default; +#else +struct TORCH_API ViewAsComplexBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsComplexBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct WhereBackward0 : public TraceableFunction { + TORCH_API WhereBackward0() = default; +#else +struct TORCH_API WhereBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "WhereBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + condition_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable condition_; + +}; +#ifdef _WIN32 +struct WeightNormInterfaceBackward0 : public TraceableFunction { + TORCH_API WeightNormInterfaceBackward0() = default; +#else +struct TORCH_API WeightNormInterfaceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "WeightNormInterfaceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + g_.reset_data(); + v_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable g_; + SavedVariable v_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct ZeroBackward0 : public TraceableFunction { + TORCH_API ZeroBackward0() = default; +#else +struct TORCH_API ZeroBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ZeroBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct SparseMaskBackward0 : public TraceableFunction { + TORCH_API SparseMaskBackward0() = default; +#else +struct TORCH_API SparseMaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseMaskBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + at::Layout self_layout; + +}; +#ifdef _WIN32 +struct SparseCooTensorWithDimsAndTensorsBackward0 : public TraceableFunction { + TORCH_API SparseCooTensorWithDimsAndTensorsBackward0() = default; +#else +struct TORCH_API SparseCooTensorWithDimsAndTensorsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseCooTensorWithDimsAndTensorsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseCompressedTensorBackward0 : public TraceableFunction { + TORCH_API SparseCompressedTensorBackward0() = default; +#else +struct TORCH_API SparseCompressedTensorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseCompressedTensorBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + values_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable values_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseSumBackward0 : public TraceableFunction { + TORCH_API SparseSumBackward0() = default; +#else +struct TORCH_API SparseSumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct StandardGammaBackward0 : public TraceableFunction { + TORCH_API StandardGammaBackward0() = default; +#else +struct TORCH_API StandardGammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StandardGammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct StandardGammaGradBackward0 : public TraceableFunction { + TORCH_API StandardGammaGradBackward0() = default; +#else +struct TORCH_API StandardGammaGradBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StandardGammaGradBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ValuesBackward0 : public Node { + TORCH_API ValuesBackward0() = default; +#else +struct TORCH_API ValuesBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ValuesBackwardAutogradNestedTensor0 : public Node { + TORCH_API ValuesBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API ValuesBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TrilinearBackward0 : public TraceableFunction { + TORCH_API TrilinearBackward0() = default; +#else +struct TORCH_API TrilinearBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TrilinearBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + i1_.reset_data(); + i2_.reset_data(); + i3_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector expand1; + std::vector expand2; + std::vector expand3; + SavedVariable i1_; + SavedVariable i2_; + SavedVariable i3_; + std::vector sumdim; + +}; +#ifdef _WIN32 +struct ConstantPadNdBackward0 : public TraceableFunction { + TORCH_API ConstantPadNdBackward0() = default; +#else +struct TORCH_API ConstantPadNdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConstantPadNdBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector pad; + +}; +#ifdef _WIN32 +struct BinaryCrossEntropyBackward0 : public TraceableFunction { + TORCH_API BinaryCrossEntropyBackward0() = default; +#else +struct TORCH_API BinaryCrossEntropyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BinaryCrossEntropyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct BinaryCrossEntropyBackwardBackward0 : public TraceableFunction { + TORCH_API BinaryCrossEntropyBackwardBackward0() = default; +#else +struct TORCH_API BinaryCrossEntropyBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BinaryCrossEntropyBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct BinaryCrossEntropyWithLogitsBackward0 : public TraceableFunction { + TORCH_API BinaryCrossEntropyWithLogitsBackward0() = default; +#else +struct TORCH_API BinaryCrossEntropyWithLogitsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BinaryCrossEntropyWithLogitsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + pos_weight_.reset_data(); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable pos_weight_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct EmbeddingBackward0 : public TraceableFunction { + TORCH_API EmbeddingBackward0() = default; +#else +struct TORCH_API EmbeddingBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + c10::SymInt padding_idx; + bool scale_grad_by_freq; + bool sparse; + c10::SymInt weight_sym_argsize_0; + +}; +#ifdef _WIN32 +struct EmbeddingDenseBackwardBackward0 : public TraceableFunction { + TORCH_API EmbeddingDenseBackwardBackward0() = default; +#else +struct TORCH_API EmbeddingDenseBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingDenseBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + c10::SymInt padding_idx; + +}; +#ifdef _WIN32 +struct EmbeddingBagBackward0 : public TraceableFunction { + TORCH_API EmbeddingBagBackward0() = default; +#else +struct TORCH_API EmbeddingBagBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingBagBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + offsets_.reset_data(); + per_sample_weights_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + result3_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + int64_t mode = 0; + SavedVariable offsets_; + int64_t padding_idx = 0; + SavedVariable per_sample_weights_; + bool scale_grad_by_freq; + bool sparse; + SavedVariable weight_; + c10::SymInt weight_sym_argsize_0; + SavedVariable result1_; + SavedVariable result2_; + SavedVariable result3_; + +}; +#ifdef _WIN32 +struct EmbeddingRenormBackward0 : public TraceableFunction { + TORCH_API EmbeddingRenormBackward0() = default; +#else +struct TORCH_API EmbeddingRenormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingRenormBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct MseLossBackward0 : public TraceableFunction { + TORCH_API MseLossBackward0() = default; +#else +struct TORCH_API MseLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MseLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct MultiMarginLossBackward0 : public TraceableFunction { + TORCH_API MultiMarginLossBackward0() = default; +#else +struct TORCH_API MultiMarginLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MultiMarginLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar margin; + at::Scalar p; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MultilabelMarginLossBackward0 : public TraceableFunction { + TORCH_API MultilabelMarginLossBackward0() = default; +#else +struct TORCH_API MultilabelMarginLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MultilabelMarginLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + is_target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable is_target_; + +}; +#ifdef _WIN32 +struct NllLossBackward0 : public TraceableFunction { + TORCH_API NllLossBackward0() = default; +#else +struct TORCH_API NllLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + total_weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + SavedVariable total_weight_; + +}; +#ifdef _WIN32 +struct NllLoss2DBackward0 : public TraceableFunction { + TORCH_API NllLoss2DBackward0() = default; +#else +struct TORCH_API NllLoss2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLoss2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + total_weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + SavedVariable total_weight_; + +}; +#ifdef _WIN32 +struct SmoothL1LossBackward0 : public TraceableFunction { + TORCH_API SmoothL1LossBackward0() = default; +#else +struct TORCH_API SmoothL1LossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SmoothL1LossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double beta; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct HuberLossBackward0 : public TraceableFunction { + TORCH_API HuberLossBackward0() = default; +#else +struct TORCH_API HuberLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HuberLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double delta; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct SoftMarginLossBackward0 : public TraceableFunction { + TORCH_API SoftMarginLossBackward0() = default; +#else +struct TORCH_API SoftMarginLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftMarginLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct ReluBackward0 : public TraceableFunction { + TORCH_API ReluBackward0() = default; +#else +struct TORCH_API ReluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SiluBackward0 : public TraceableFunction { + TORCH_API SiluBackward0() = default; +#else +struct TORCH_API SiluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SiluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MishBackward0 : public TraceableFunction { + TORCH_API MishBackward0() = default; +#else +struct TORCH_API MishBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MishBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct EluBackward0 : public TraceableFunction { + TORCH_API EluBackward0() = default; +#else +struct TORCH_API EluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar input_scale; + at::Scalar scale; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct EluBackward1 : public TraceableFunction { + TORCH_API EluBackward1() = default; +#else +struct TORCH_API EluBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EluBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar input_scale; + at::Scalar scale; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CeluBackward0 : public TraceableFunction { + TORCH_API CeluBackward0() = default; +#else +struct TORCH_API CeluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CeluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CeluBackward1 : public TraceableFunction { + TORCH_API CeluBackward1() = default; +#else +struct TORCH_API CeluBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CeluBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct GeluBackward0 : public TraceableFunction { + TORCH_API GeluBackward0() = default; +#else +struct TORCH_API GeluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::string approximate; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct GeluBackwardBackward0 : public TraceableFunction { + TORCH_API GeluBackwardBackward0() = default; +#else +struct TORCH_API GeluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::string approximate; + SavedVariable grad_output_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct GluBackward0 : public TraceableFunction { + TORCH_API GluBackward0() = default; +#else +struct TORCH_API GluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardshrinkBackward0 : public TraceableFunction { + TORCH_API HardshrinkBackward0() = default; +#else +struct TORCH_API HardshrinkBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardshrinkBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardshrinkBackwardBackward0 : public TraceableFunction { + TORCH_API HardshrinkBackwardBackward0() = default; +#else +struct TORCH_API HardshrinkBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardshrinkBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardtanhBackward0 : public TraceableFunction { + TORCH_API HardtanhBackward0() = default; +#else +struct TORCH_API HardtanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardtanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar max_val; + at::Scalar min_val; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LeakyReluBackward0 : public TraceableFunction { + TORCH_API LeakyReluBackward0() = default; +#else +struct TORCH_API LeakyReluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeakyReluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar negative_slope; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LeakyReluBackward1 : public TraceableFunction { + TORCH_API LeakyReluBackward1() = default; +#else +struct TORCH_API LeakyReluBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeakyReluBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar negative_slope; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LogSigmoidBackward0 : public TraceableFunction { + TORCH_API LogSigmoidBackward0() = default; +#else +struct TORCH_API LogSigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + buffer_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable buffer_; + +}; +#ifdef _WIN32 +struct LogSoftmaxBackward0 : public TraceableFunction { + TORCH_API LogSoftmaxBackward0() = default; +#else +struct TORCH_API LogSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::ScalarType self_scalar_type; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseLogSoftmaxBackward0 : public TraceableFunction { + TORCH_API SparseLogSoftmaxBackward0() = default; +#else +struct TORCH_API SparseLogSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseLogSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MaskedSoftmaxBackward0 : public TraceableFunction { + TORCH_API MaskedSoftmaxBackward0() = default; +#else +struct TORCH_API MaskedSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional dim; + SavedVariable mask_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PreluKernelBackward0 : public TraceableFunction { + TORCH_API PreluKernelBackward0() = default; +#else +struct TORCH_API PreluKernelBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PreluKernelBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct PreluKernelBackwardBackward0 : public TraceableFunction { + TORCH_API PreluKernelBackwardBackward0() = default; +#else +struct TORCH_API PreluKernelBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PreluKernelBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + at::TensorOptions grad_output_options; + SavedVariable self_; + torch::autograd::generated::TypeAndSize self_info; + at::TensorOptions self_options; + SavedVariable weight_; + at::TensorOptions weight_options; + +}; +#ifdef _WIN32 +struct RreluWithNoiseBackward0 : public TraceableFunction { + TORCH_API RreluWithNoiseBackward0() = default; +#else +struct TORCH_API RreluWithNoiseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RreluWithNoiseBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + noise_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lower; + SavedVariable noise_; + SavedVariable self_; + bool training; + at::Scalar upper; + +}; +#ifdef _WIN32 +struct RreluWithNoiseBackward1 : public TraceableFunction { + TORCH_API RreluWithNoiseBackward1() = default; +#else +struct TORCH_API RreluWithNoiseBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RreluWithNoiseBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + noise_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lower; + SavedVariable noise_; + bool training; + at::Scalar upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SoftmaxBackward0 : public TraceableFunction { + TORCH_API SoftmaxBackward0() = default; +#else +struct TORCH_API SoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::ScalarType self_scalar_type; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseSoftmaxBackward0 : public TraceableFunction { + TORCH_API SparseSoftmaxBackward0() = default; +#else +struct TORCH_API SparseSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseSparseMatmulBackward0 : public TraceableFunction { + TORCH_API SparseSparseMatmulBackward0() = default; +#else +struct TORCH_API SparseSparseMatmulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSparseMatmulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SoftplusBackward0 : public TraceableFunction { + TORCH_API SoftplusBackward0() = default; +#else +struct TORCH_API SoftplusBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftplusBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar beta; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct SoftshrinkBackward0 : public TraceableFunction { + TORCH_API SoftshrinkBackward0() = default; +#else +struct TORCH_API SoftshrinkBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftshrinkBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ThresholdBackward0 : public TraceableFunction { + TORCH_API ThresholdBackward0() = default; +#else +struct TORCH_API ThresholdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThresholdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct ThresholdBackward1 : public TraceableFunction { + TORCH_API ThresholdBackward1() = default; +#else +struct TORCH_API ThresholdBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThresholdBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct ReflectionPad1DBackward0 : public TraceableFunction { + TORCH_API ReflectionPad1DBackward0() = default; +#else +struct TORCH_API ReflectionPad1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad1DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReflectionPad2DBackward0 : public TraceableFunction { + TORCH_API ReflectionPad2DBackward0() = default; +#else +struct TORCH_API ReflectionPad2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReflectionPad3DBackward0 : public TraceableFunction { + TORCH_API ReflectionPad3DBackward0() = default; +#else +struct TORCH_API ReflectionPad3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReplicationPad1DBackward0 : public TraceableFunction { + TORCH_API ReplicationPad1DBackward0() = default; +#else +struct TORCH_API ReplicationPad1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad1DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReplicationPad2DBackward0 : public TraceableFunction { + TORCH_API ReplicationPad2DBackward0() = default; +#else +struct TORCH_API ReplicationPad2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReplicationPad3DBackward0 : public TraceableFunction { + TORCH_API ReplicationPad3DBackward0() = default; +#else +struct TORCH_API ReplicationPad3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct UpsampleLinear1DBackward0 : public TraceableFunction { + TORCH_API UpsampleLinear1DBackward0() = default; +#else +struct TORCH_API UpsampleLinear1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleLinear1DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DAaBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DAaBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DAaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DAaBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DAaBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DAaBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DAaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DAaBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleTrilinear3DBackward0 : public TraceableFunction { + TORCH_API UpsampleTrilinear3DBackward0() = default; +#else +struct TORCH_API UpsampleTrilinear3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleTrilinear3DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearest1DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest1DBackward0() = default; +#else +struct TORCH_API UpsampleNearest1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest1DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact1DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact1DBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact1DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearest2DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest2DBackward0() = default; +#else +struct TORCH_API UpsampleNearest2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact2DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact2DBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearest3DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest3DBackward0() = default; +#else +struct TORCH_API UpsampleNearest3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest3DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact3DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact3DBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact3DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct PixelShuffleBackward0 : public TraceableFunction { + TORCH_API PixelShuffleBackward0() = default; +#else +struct TORCH_API PixelShuffleBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PixelShuffleBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t upscale_factor = 0; + +}; +#ifdef _WIN32 +struct PixelUnshuffleBackward0 : public TraceableFunction { + TORCH_API PixelUnshuffleBackward0() = default; +#else +struct TORCH_API PixelUnshuffleBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PixelUnshuffleBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t downscale_factor = 0; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool2DBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool2DBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool3DBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool3DBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool2DBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool2DBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool3DBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool3DBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct AvgPool2DBackward0 : public TraceableFunction { + TORCH_API AvgPool2DBackward0() = default; +#else +struct TORCH_API AvgPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + +}; +#ifdef _WIN32 +struct AvgPool3DBackward0 : public TraceableFunction { + TORCH_API AvgPool3DBackward0() = default; +#else +struct TORCH_API AvgPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + +}; +#ifdef _WIN32 +struct FractionalMaxPool2DBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool2DBackward0() = default; +#else +struct TORCH_API FractionalMaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector kernel_size; + std::vector output_size; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct FractionalMaxPool3DBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool3DBackward0() = default; +#else +struct TORCH_API FractionalMaxPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector kernel_size; + std::vector output_size; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct LinearBackward0 : public TraceableFunction { + TORCH_API LinearBackward0() = default; +#else +struct TORCH_API LinearBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinearBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct LinearBackwardBackward0 : public TraceableFunction { + TORCH_API LinearBackwardBackward0() = default; +#else +struct TORCH_API LinearBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinearBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MaxPool2DBackward0 : public TraceableFunction { + TORCH_API MaxPool2DBackward0() = default; +#else +struct TORCH_API MaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + +}; +#ifdef _WIN32 +struct MpsConvolutionBackward0 : public TraceableFunction { + TORCH_API MpsConvolutionBackward0() = default; +#else +struct TORCH_API MpsConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MpsConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MpsConvolutionBackwardBackward0 : public TraceableFunction { + TORCH_API MpsConvolutionBackwardBackward0() = default; +#else +struct TORCH_API MpsConvolutionBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MpsConvolutionBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + SavedVariable grad_output_; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MaxPool2DWithIndicesBackward0 : public TraceableFunction { + TORCH_API MaxPool2DWithIndicesBackward0() = default; +#else +struct TORCH_API MaxPool2DWithIndicesBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DWithIndicesBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct MaxPool3DWithIndicesBackward0 : public TraceableFunction { + TORCH_API MaxPool3DWithIndicesBackward0() = default; +#else +struct TORCH_API MaxPool3DWithIndicesBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool3DWithIndicesBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct MaxUnpool2DBackward0 : public TraceableFunction { + TORCH_API MaxUnpool2DBackward0() = default; +#else +struct TORCH_API MaxUnpool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxUnpool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MaxUnpool3DBackward0 : public TraceableFunction { + TORCH_API MaxUnpool3DBackward0() = default; +#else +struct TORCH_API MaxUnpool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxUnpool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct ConvolutionBackward0 : public TraceableFunction { + TORCH_API ConvolutionBackward0() = default; +#else +struct TORCH_API ConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionBackward1 : public TraceableFunction { + TORCH_API ConvolutionBackward1() = default; +#else +struct TORCH_API ConvolutionBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionBackwardBackward0 : public TraceableFunction { + TORCH_API ConvolutionBackwardBackward0() = default; +#else +struct TORCH_API ConvolutionBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + SavedVariable grad_output_; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionOverrideableBackward0 : public TraceableFunction { + TORCH_API ConvolutionOverrideableBackward0() = default; +#else +struct TORCH_API ConvolutionOverrideableBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionOverrideableBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionBackwardOverrideableBackward0 : public TraceableFunction { + TORCH_API ConvolutionBackwardOverrideableBackward0() = default; +#else +struct TORCH_API ConvolutionBackwardOverrideableBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackwardOverrideableBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + SavedVariable grad_output_; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvTranspose2DBackward0 : public TraceableFunction { + TORCH_API SlowConvTranspose2DBackward0() = default; +#else +struct TORCH_API SlowConvTranspose2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvTranspose2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvTranspose3DBackward0 : public TraceableFunction { + TORCH_API SlowConvTranspose3DBackward0() = default; +#else +struct TORCH_API SlowConvTranspose3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvTranspose3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConv2DBackward0 : public TraceableFunction { + TORCH_API SlowConv2DBackward0() = default; +#else +struct TORCH_API SlowConv2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConv2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConv2DBackwardBackward0 : public TraceableFunction { + TORCH_API SlowConv2DBackwardBackward0() = default; +#else +struct TORCH_API SlowConv2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConv2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvDepthwise2DBackward0 : public TraceableFunction { + TORCH_API ConvDepthwise2DBackward0() = default; +#else +struct TORCH_API ConvDepthwise2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvDepthwise2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvDepthwise3DBackward0 : public TraceableFunction { + TORCH_API ConvDepthwise3DBackward0() = default; +#else +struct TORCH_API ConvDepthwise3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvDepthwise3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConv3DBackward0 : public TraceableFunction { + TORCH_API SlowConv3DBackward0() = default; +#else +struct TORCH_API SlowConv3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConv3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvDilated2DBackward0 : public TraceableFunction { + TORCH_API SlowConvDilated2DBackward0() = default; +#else +struct TORCH_API SlowConvDilated2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvDilated2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvDilated3DBackward0 : public TraceableFunction { + TORCH_API SlowConvDilated3DBackward0() = default; +#else +struct TORCH_API SlowConvDilated3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvDilated3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct Col2ImBackward0 : public TraceableFunction { + TORCH_API Col2ImBackward0() = default; +#else +struct TORCH_API Col2ImBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Col2ImBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + std::vector stride; + +}; +#ifdef _WIN32 +struct Im2ColBackward0 : public TraceableFunction { + TORCH_API Im2ColBackward0() = default; +#else +struct TORCH_API Im2ColBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Im2ColBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + c10::SymInt self_sym_argsize_minus_1; + c10::SymInt self_sym_argsize_minus_2; + std::vector stride; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool2DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt grad_output_sym_argsize_minus_1; + c10::SymInt grad_output_sym_argsize_minus_2; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool3DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt grad_output_sym_argsize_minus_1; + c10::SymInt grad_output_sym_argsize_minus_2; + c10::SymInt grad_output_sym_argsize_minus_3; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool2DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool3DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool3DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AvgPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API AvgPool2DBackwardBackward0() = default; +#else +struct TORCH_API AvgPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + std::vector stride; + +}; +#ifdef _WIN32 +struct AvgPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API AvgPool3DBackwardBackward0() = default; +#else +struct TORCH_API AvgPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + std::vector stride; + +}; +#ifdef _WIN32 +struct EluBackwardBackward0 : public TraceableFunction { + TORCH_API EluBackwardBackward0() = default; +#else +struct TORCH_API EluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_or_result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable grad_output_; + at::Scalar input_scale; + bool is_result; + at::Scalar scale; + SavedVariable self_or_result_; + +}; +#ifdef _WIN32 +struct FractionalMaxPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool2DBackwardBackward0() = default; +#else +struct TORCH_API FractionalMaxPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct FractionalMaxPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool3DBackwardBackward0() = default; +#else +struct TORCH_API FractionalMaxPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool3DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GluBackwardBackward0 : public TraceableFunction { + TORCH_API GluBackwardBackward0() = default; +#else +struct TORCH_API GluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable grad_output_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardtanhBackwardBackward0 : public TraceableFunction { + TORCH_API HardtanhBackwardBackward0() = default; +#else +struct TORCH_API HardtanhBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardtanhBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar max_val; + at::Scalar min_val; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogSigmoidBackwardBackward0 : public TraceableFunction { + TORCH_API LogSigmoidBackwardBackward0() = default; +#else +struct TORCH_API LogSigmoidBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSigmoidBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + buffer_.reset_data(); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable buffer_; + SavedVariable grad_output_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogSoftmaxBackwardDataBackward0 : public TraceableFunction { + TORCH_API LogSoftmaxBackwardDataBackward0() = default; +#else +struct TORCH_API LogSoftmaxBackwardDataBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSoftmaxBackwardDataBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable grad_output_; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct LeakyReluBackwardBackward0 : public TraceableFunction { + TORCH_API LeakyReluBackwardBackward0() = default; +#else +struct TORCH_API LeakyReluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeakyReluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar negative_slope; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MaxPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API MaxPool2DBackwardBackward0() = default; +#else +struct TORCH_API MaxPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct MaxPool2DWithIndicesBackwardBackward0 : public TraceableFunction { + TORCH_API MaxPool2DWithIndicesBackwardBackward0() = default; +#else +struct TORCH_API MaxPool2DWithIndicesBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DWithIndicesBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct MaxPool3DWithIndicesBackwardBackward0 : public TraceableFunction { + TORCH_API MaxPool3DWithIndicesBackwardBackward0() = default; +#else +struct TORCH_API MaxPool3DWithIndicesBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool3DWithIndicesBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct MseLossBackwardBackward0 : public TraceableFunction { + TORCH_API MseLossBackwardBackward0() = default; +#else +struct TORCH_API MseLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MseLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct NllLossBackwardBackward0 : public TraceableFunction { + TORCH_API NllLossBackwardBackward0() = default; +#else +struct TORCH_API NllLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NllLoss2DBackwardBackward0 : public TraceableFunction { + TORCH_API NllLoss2DBackwardBackward0() = default; +#else +struct TORCH_API NllLoss2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLoss2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct RreluWithNoiseBackwardBackward0 : public TraceableFunction { + TORCH_API RreluWithNoiseBackwardBackward0() = default; +#else +struct TORCH_API RreluWithNoiseBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RreluWithNoiseBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + noise_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lower; + SavedVariable noise_; + SavedVariable self_; + bool training; + at::Scalar upper; + +}; +#ifdef _WIN32 +struct ReflectionPad1DBackwardBackward0 : public TraceableFunction { + TORCH_API ReflectionPad1DBackwardBackward0() = default; +#else +struct TORCH_API ReflectionPad1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReflectionPad2DBackwardBackward0 : public TraceableFunction { + TORCH_API ReflectionPad2DBackwardBackward0() = default; +#else +struct TORCH_API ReflectionPad2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReflectionPad3DBackwardBackward0 : public TraceableFunction { + TORCH_API ReflectionPad3DBackwardBackward0() = default; +#else +struct TORCH_API ReflectionPad3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReplicationPad1DBackwardBackward0 : public TraceableFunction { + TORCH_API ReplicationPad1DBackwardBackward0() = default; +#else +struct TORCH_API ReplicationPad1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReplicationPad2DBackwardBackward0 : public TraceableFunction { + TORCH_API ReplicationPad2DBackwardBackward0() = default; +#else +struct TORCH_API ReplicationPad2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReplicationPad3DBackwardBackward0 : public TraceableFunction { + TORCH_API ReplicationPad3DBackwardBackward0() = default; +#else +struct TORCH_API ReplicationPad3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct SparseSampledAddmmBackward0 : public TraceableFunction { + TORCH_API SparseSampledAddmmBackward0() = default; +#else +struct TORCH_API SparseSampledAddmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSampledAddmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat1_.reset_data(); + mat2_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat1_; + SavedVariable mat2_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SparseMmReduceImplBackward0 : public TraceableFunction { + TORCH_API SparseMmReduceImplBackward0() = default; +#else +struct TORCH_API SparseMmReduceImplBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseMmReduceImplBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + std::string reduce; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct SmoothL1LossBackwardBackward0 : public TraceableFunction { + TORCH_API SmoothL1LossBackwardBackward0() = default; +#else +struct TORCH_API SmoothL1LossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SmoothL1LossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double beta; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct HuberLossBackwardBackward0 : public TraceableFunction { + TORCH_API HuberLossBackwardBackward0() = default; +#else +struct TORCH_API HuberLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HuberLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double delta; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct SoftplusBackwardBackward0 : public TraceableFunction { + TORCH_API SoftplusBackwardBackward0() = default; +#else +struct TORCH_API SoftplusBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftplusBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar beta; + SavedVariable grad_output_; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct SoftmaxBackwardDataBackward0 : public TraceableFunction { + TORCH_API SoftmaxBackwardDataBackward0() = default; +#else +struct TORCH_API SoftmaxBackwardDataBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftmaxBackwardDataBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable grad_output_; + at::ScalarType input_dtype; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct SoftMarginLossBackwardBackward0 : public TraceableFunction { + TORCH_API SoftMarginLossBackwardBackward0() = default; +#else +struct TORCH_API SoftMarginLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftMarginLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct SoftshrinkBackwardBackward0 : public TraceableFunction { + TORCH_API SoftshrinkBackwardBackward0() = default; +#else +struct TORCH_API SoftshrinkBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftshrinkBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ThresholdBackwardBackward0 : public TraceableFunction { + TORCH_API ThresholdBackwardBackward0() = default; +#else +struct TORCH_API ThresholdBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThresholdBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct UpsampleLinear1DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleLinear1DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleLinear1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleLinear1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DAaBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DAaBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DAaBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DAaBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DAaBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DAaBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DAaBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DAaBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleTrilinear3DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleTrilinear3DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleTrilinear3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleTrilinear3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearest1DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest1DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearest1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact1DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact1DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + +}; +#ifdef _WIN32 +struct UpsampleNearest2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearest2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearest3DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest3DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearest3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact3DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact3DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct SigmoidBackwardBackward0 : public TraceableFunction { + TORCH_API SigmoidBackwardBackward0() = default; +#else +struct TORCH_API SigmoidBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SigmoidBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct TanhBackwardBackward0 : public TraceableFunction { + TORCH_API TanhBackwardBackward0() = default; +#else +struct TORCH_API TanhBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TanhBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct CudnnCtcLossBackward0 : public TraceableFunction { + TORCH_API CudnnCtcLossBackward0() = default; +#else +struct TORCH_API CudnnCtcLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnCtcLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct CudnnCtcLossBackward1 : public TraceableFunction { + TORCH_API CudnnCtcLossBackward1() = default; +#else +struct TORCH_API CudnnCtcLossBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnCtcLossBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct CudnnConvolutionTransposeBackward0 : public TraceableFunction { + TORCH_API CudnnConvolutionTransposeBackward0() = default; +#else +struct TORCH_API CudnnConvolutionTransposeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnConvolutionTransposeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MpsConvolutionTransposeBackward0 : public TraceableFunction { + TORCH_API MpsConvolutionTransposeBackward0() = default; +#else +struct TORCH_API MpsConvolutionTransposeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MpsConvolutionTransposeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct CudnnConvolutionBackward0 : public TraceableFunction { + TORCH_API CudnnConvolutionBackward0() = default; +#else +struct TORCH_API CudnnConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct CudnnGridSamplerBackward0 : public TraceableFunction { + TORCH_API CudnnGridSamplerBackward0() = default; +#else +struct TORCH_API CudnnGridSamplerBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnGridSamplerBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grid_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CudnnAffineGridGeneratorBackward0 : public TraceableFunction { + TORCH_API CudnnAffineGridGeneratorBackward0() = default; +#else +struct TORCH_API CudnnAffineGridGeneratorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnAffineGridGeneratorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t C = 0; + int64_t H = 0; + int64_t N = 0; + int64_t W = 0; + +}; +#ifdef _WIN32 +struct CudnnBatchNormBackward0 : public TraceableFunction { + TORCH_API CudnnBatchNormBackward0() = default; +#else +struct TORCH_API CudnnBatchNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnBatchNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + result3_.reset_data(); + } + bool retain_variables = true; + void will_release_variables() override { + retain_variables = false; + } + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + SavedVariable result3_; + +}; +#ifdef _WIN32 +struct CudnnBatchNormBackwardBackward0 : public TraceableFunction { + TORCH_API CudnnBatchNormBackwardBackward0() = default; +#else +struct TORCH_API CudnnBatchNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnBatchNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + reserveSpace_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + save_mean_.reset_data(); + save_var_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable grad_output_; + SavedVariable input_; + SavedVariable reserveSpace_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable save_mean_; + SavedVariable save_var_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NnpackSpatialConvolutionBackward0 : public TraceableFunction { + TORCH_API NnpackSpatialConvolutionBackward0() = default; +#else +struct TORCH_API NnpackSpatialConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NnpackSpatialConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + SavedVariable input_; + std::vector padding; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct LstmMpsBackward0 : public TraceableFunction { + TORCH_API LstmMpsBackward0() = default; +#else +struct TORCH_API LstmMpsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LstmMpsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + hx_.clear(); + hx_released_ = true; + input_.reset_data(); + params_.clear(); + params_released_ = true; + result3_.reset_data(); + result4_.reset_data(); + result5_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + bool bidirectional; + double dropout; + bool has_biases; + std::vector hx_; + bool hx_released_ = false; + SavedVariable input_; + int64_t num_layers = 0; + std::vector params_; + bool params_released_ = false; + bool train; + SavedVariable result3_; + SavedVariable result4_; + SavedVariable result5_; + size_t hx_size_; + size_t params_size_; +}; +#ifdef _WIN32 +struct CudnnRnnBackward0 : public TraceableFunction { + TORCH_API CudnnRnnBackward0() = default; +#else +struct TORCH_API CudnnRnnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnRnnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx_.reset_data(); + dropout_state_.reset_data(); + hx_.reset_data(); + input_.reset_data(); + weight_.clear(); + weight_released_ = true; + result0_.reset_data(); + result3_.reset_data(); + result4_.reset_data(); + } + bool retain_variables = true; + void will_release_variables() override { + retain_variables = false; + } + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector batch_sizes; + bool bidirectional; + SavedVariable cx_; + double dropout; + SavedVariable dropout_state_; + c10::SymInt hidden_size; + SavedVariable hx_; + SavedVariable input_; + int64_t mode = 0; + int64_t num_layers = 0; + c10::SymInt proj_size; + bool train; + std::vector weight_; + bool weight_released_ = false; + int64_t weight_stride0 = 0; + SavedVariable result0_; + SavedVariable result3_; + SavedVariable result4_; + size_t weight_size_; +}; +#ifdef _WIN32 +struct CudnnRnnBackwardBackward0 : public TraceableFunction { + TORCH_API CudnnRnnBackwardBackward0() = default; +#else +struct TORCH_API CudnnRnnBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnRnnBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t weight_size_; +}; +#ifdef _WIN32 +struct MiopenConvolutionTransposeBackward0 : public TraceableFunction { + TORCH_API MiopenConvolutionTransposeBackward0() = default; +#else +struct TORCH_API MiopenConvolutionTransposeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenConvolutionTransposeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenConvolutionBackward0 : public TraceableFunction { + TORCH_API MiopenConvolutionBackward0() = default; +#else +struct TORCH_API MiopenConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenDepthwiseConvolutionBackward0 : public TraceableFunction { + TORCH_API MiopenDepthwiseConvolutionBackward0() = default; +#else +struct TORCH_API MiopenDepthwiseConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenDepthwiseConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenBatchNormBackward0 : public TraceableFunction { + TORCH_API MiopenBatchNormBackward0() = default; +#else +struct TORCH_API MiopenBatchNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenBatchNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct MiopenBatchNormBackwardBackward0 : public TraceableFunction { + TORCH_API MiopenBatchNormBackwardBackward0() = default; +#else +struct TORCH_API MiopenBatchNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenBatchNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + save_mean_.reset_data(); + save_var_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable grad_output_; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable save_mean_; + SavedVariable save_var_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenRnnBackward0 : public TraceableFunction { + TORCH_API MiopenRnnBackward0() = default; +#else +struct TORCH_API MiopenRnnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenRnnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx_.reset_data(); + dropout_state_.reset_data(); + hx_.reset_data(); + input_.reset_data(); + weight_.clear(); + weight_released_ = true; + result0_.reset_data(); + result3_.reset_data(); + result4_.reset_data(); + } + bool retain_variables = true; + void will_release_variables() override { + retain_variables = false; + } + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector batch_sizes; + bool bidirectional; + SavedVariable cx_; + double dropout; + SavedVariable dropout_state_; + int64_t hidden_size = 0; + SavedVariable hx_; + SavedVariable input_; + int64_t mode = 0; + int64_t num_layers = 0; + bool train; + std::vector weight_; + bool weight_released_ = false; + int64_t weight_stride0 = 0; + SavedVariable result0_; + SavedVariable result3_; + SavedVariable result4_; + size_t weight_size_; +}; +#ifdef _WIN32 +struct MkldnnRnnLayerBackward0 : public TraceableFunction { + TORCH_API MkldnnRnnLayerBackward0() = default; +#else +struct TORCH_API MkldnnRnnLayerBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnRnnLayerBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx__.reset_data(); + hx__.reset_data(); + input_.reset_data(); + weight0_.reset_data(); + weight1_.reset_data(); + weight2_.reset_data(); + weight3_.reset_data(); + result0_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + result3_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector batch_sizes; + bool bidirectional; + SavedVariable cx__; + bool has_biases; + int64_t hidden_size = 0; + SavedVariable hx__; + SavedVariable input_; + int64_t mode = 0; + int64_t num_layers = 0; + bool reverse; + bool train; + SavedVariable weight0_; + SavedVariable weight1_; + SavedVariable weight2_; + SavedVariable weight3_; + SavedVariable result0_; + SavedVariable result1_; + SavedVariable result2_; + SavedVariable result3_; + +}; +#ifdef _WIN32 +struct MkldnnConvolutionBackward0 : public TraceableFunction { + TORCH_API MkldnnConvolutionBackward0() = default; +#else +struct TORCH_API MkldnnConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MkldnnLinearBackward0 : public TraceableFunction { + TORCH_API MkldnnLinearBackward0() = default; +#else +struct TORCH_API MkldnnLinearBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnLinearBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MkldnnMaxPool2DBackward0 : public TraceableFunction { + TORCH_API MkldnnMaxPool2DBackward0() = default; +#else +struct TORCH_API MkldnnMaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnMaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MkldnnMaxPool3DBackward0 : public TraceableFunction { + TORCH_API MkldnnMaxPool3DBackward0() = default; +#else +struct TORCH_API MkldnnMaxPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnMaxPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MkldnnAdaptiveAvgPool2DBackward0 : public TraceableFunction { + TORCH_API MkldnnAdaptiveAvgPool2DBackward0() = default; +#else +struct TORCH_API MkldnnAdaptiveAvgPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnAdaptiveAvgPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MkldnnReshapeBackward0 : public TraceableFunction { + TORCH_API MkldnnReshapeBackward0() = default; +#else +struct TORCH_API MkldnnReshapeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnReshapeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct NestedTensorFromTensorListBackward0 : public TraceableFunction { + TORCH_API NestedTensorFromTensorListBackward0() = default; +#else +struct TORCH_API NestedTensorFromTensorListBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedTensorFromTensorListBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + list_.clear(); + list_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector list_; + bool list_released_ = false; + size_t list_size_; +}; +#ifdef _WIN32 +struct NestedTensorFromMaskBackward0 : public TraceableFunction { + TORCH_API NestedTensorFromMaskBackward0() = default; +#else +struct TORCH_API NestedTensorFromMaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedTensorFromMaskBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector t_sym_sizes; + +}; +#ifdef _WIN32 +struct NestedFromPaddedBackward0 : public TraceableFunction { + TORCH_API NestedFromPaddedBackward0() = default; +#else +struct TORCH_API NestedFromPaddedBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedFromPaddedBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + padded_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool fuse_transform_0213; + SavedVariable padded_; + +}; +#ifdef _WIN32 +struct ToPaddedTensorBackward0 : public TraceableFunction { + TORCH_API ToPaddedTensorBackward0() = default; +#else +struct TORCH_API ToPaddedTensorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToPaddedTensorBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NestedViewFromBufferBackward0 : public Node { + TORCH_API NestedViewFromBufferBackward0() = default; +#else +struct TORCH_API NestedViewFromBufferBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedViewFromBufferBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NestedViewFromJaggedBackward0 : public Node { + TORCH_API NestedViewFromJaggedBackward0() = default; +#else +struct TORCH_API NestedViewFromJaggedBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedViewFromJaggedBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NestedGetValuesBackward0 : public Node { + TORCH_API NestedGetValuesBackward0() = default; +#else +struct TORCH_API NestedGetValuesBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedGetValuesBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ScaledDotProductEfficientAttentionBackward0 : public TraceableFunction { + TORCH_API ScaledDotProductEfficientAttentionBackward0() = default; +#else +struct TORCH_API ScaledDotProductEfficientAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScaledDotProductEfficientAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + attn_bias_.reset_data(); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + log_sumexp_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable attn_bias_; + double dropout_p; + bool is_causal; + SavedVariable key_; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable log_sumexp_; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + +}; +#ifdef _WIN32 +struct ScaledDotProductFlashAttentionBackward0 : public TraceableFunction { + TORCH_API ScaledDotProductFlashAttentionBackward0() = default; +#else +struct TORCH_API ScaledDotProductFlashAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScaledDotProductFlashAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + cum_seq_k_.reset_data(); + cum_seq_q_.reset_data(); + logsumexp_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double dropout_p; + bool is_causal; + SavedVariable key_; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable cum_seq_k_; + SavedVariable cum_seq_q_; + SavedVariable logsumexp_; + c10::SymInt max_k; + c10::SymInt max_q; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + +}; +#ifdef _WIN32 +struct ScaledDotProductFlashAttentionForCpuBackward0 : public TraceableFunction { + TORCH_API ScaledDotProductFlashAttentionForCpuBackward0() = default; +#else +struct TORCH_API ScaledDotProductFlashAttentionForCpuBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScaledDotProductFlashAttentionForCpuBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + attn_mask_.reset_data(); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + logsumexp_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable attn_mask_; + double dropout_p; + bool is_causal; + SavedVariable key_; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable logsumexp_; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct FlashAttentionBackward0 : public TraceableFunction { + TORCH_API FlashAttentionBackward0() = default; +#else +struct TORCH_API FlashAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FlashAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cum_seq_k_.reset_data(); + cum_seq_q_.reset_data(); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + softmax_logsumexp_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable cum_seq_k_; + SavedVariable cum_seq_q_; + double dropout_p; + bool is_causal; + SavedVariable key_; + c10::SymInt max_k; + c10::SymInt max_q; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + SavedVariable softmax_logsumexp_; + +}; +#ifdef _WIN32 +struct EfficientAttentionBackward0 : public TraceableFunction { + TORCH_API EfficientAttentionBackward0() = default; +#else +struct TORCH_API EfficientAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EfficientAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + bias_.reset_data(); + cu_seqlens_k_.reset_data(); + cu_seqlens_q_.reset_data(); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + logsumexp_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable bias_; + SavedVariable cu_seqlens_k_; + SavedVariable cu_seqlens_q_; + int64_t custom_mask_type = 0; + double dropout_p; + SavedVariable key_; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable logsumexp_; + c10::SymInt max_seqlen_batch_k; + c10::SymInt max_seqlen_batch_q; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + +}; +#ifdef _WIN32 +struct FftR2CBackward0 : public TraceableFunction { + TORCH_API FftR2CBackward0() = default; +#else +struct TORCH_API FftR2CBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FftR2CBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t normalization = 0; + bool onesided; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FftC2RBackward0 : public TraceableFunction { + TORCH_API FftC2RBackward0() = default; +#else +struct TORCH_API FftC2RBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FftC2RBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t normalization = 0; + +}; +#ifdef _WIN32 +struct FftC2CBackward0 : public TraceableFunction { + TORCH_API FftC2CBackward0() = default; +#else +struct TORCH_API FftC2CBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FftC2CBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool forward; + int64_t normalization = 0; + +}; +#ifdef _WIN32 +struct UnbindBackward0 : public Node { + TORCH_API UnbindBackward0() = default; +#else +struct TORCH_API UnbindBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct UnbindBackwardAutogradNestedTensor0 : public Node { + TORCH_API UnbindBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API UnbindBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct StackBackward0 : public TraceableFunction { + TORCH_API StackBackward0() = default; +#else +struct TORCH_API StackBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StackBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + ::std::vector tensors_args_scalartypes; + size_t tensors_size_; +}; +#ifdef _WIN32 +struct ThnnFusedLstmCellBackward0 : public TraceableFunction { + TORCH_API ThnnFusedLstmCellBackward0() = default; +#else +struct TORCH_API ThnnFusedLstmCellBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThnnFusedLstmCellBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx_.reset_data(); + hidden_bias_.reset_data(); + hidden_gates_.reset_data(); + input_bias_.reset_data(); + input_gates_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable cx_; + SavedVariable hidden_bias_; + SavedVariable hidden_gates_; + SavedVariable input_bias_; + SavedVariable input_gates_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct ThnnFusedGruCellBackward0 : public TraceableFunction { + TORCH_API ThnnFusedGruCellBackward0() = default; +#else +struct TORCH_API ThnnFusedGruCellBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThnnFusedGruCellBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + hidden_bias_.reset_data(); + hidden_gates_.reset_data(); + hx_.reset_data(); + input_bias_.reset_data(); + input_gates_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable hidden_bias_; + SavedVariable hidden_gates_; + SavedVariable hx_; + SavedVariable input_bias_; + SavedVariable input_gates_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct PackPaddedSequenceBackward0 : public TraceableFunction { + TORCH_API PackPaddedSequenceBackward0() = default; +#else +struct TORCH_API PackPaddedSequenceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PackPaddedSequenceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector input_sym_sizes; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct SegmentReduceBackward0 : public TraceableFunction { + TORCH_API SegmentReduceBackward0() = default; +#else +struct TORCH_API SegmentReduceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SegmentReduceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + data_.reset_data(); + lengths_.reset_data(); + offsets_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t axis = 0; + SavedVariable data_; + c10::optional initial; + SavedVariable lengths_; + SavedVariable offsets_; + std::string reduce; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PinMemoryBackward0 : public TraceableFunction { + TORCH_API PinMemoryBackward0() = default; +#else +struct TORCH_API PinMemoryBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PinMemoryBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestWarnInAutogradBackward0 : public TraceableFunction { + TORCH_API TestWarnInAutogradBackward0() = default; +#else +struct TORCH_API TestWarnInAutogradBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestWarnInAutogradBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackward0 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackward0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackwardAutogradNestedTensor0 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradNestedTensor0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackwardAutogradCUDA0 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackwardAutogradCUDA0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradCUDA0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradCUDA0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackwardAutogradNestedTensor1 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor1() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradNestedTensor1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackward0 : public Node { + TORCH_API TestAutogradMultipleDispatchViewBackward0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackwardAutogradCUDA0 : public Node { + TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackwardAutogradCUDA0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ScatterReduceBackward0 : public TraceableFunction { + TORCH_API ScatterReduceBackward0() = default; +#else +struct TORCH_API ScatterReduceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterReduceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + src_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool include_self; + SavedVariable index_; + std::string reduce; + SavedVariable self_; + SavedVariable src_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ReshapeCopyBackward0 : public TraceableFunction { + TORCH_API ReshapeCopyBackward0() = default; +#else +struct TORCH_API ReshapeCopyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReshapeCopyBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ForeachDivBackward0 : public TraceableFunction { + TORCH_API ForeachDivBackward0() = default; +#else +struct TORCH_API ForeachDivBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward0 : public TraceableFunction { + TORCH_API ForeachPowBackward0() = default; +#else +struct TORCH_API ForeachPowBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.clear(); + exponent_released_ = true; + self_.clear(); + self_released_ = true; + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector exponent_; + bool exponent_released_ = false; + std::vector self_; + bool self_released_ = false; + std::vector result_; + bool result_released_ = false; + size_t self_size_; + size_t exponent_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward1 : public TraceableFunction { + TORCH_API ForeachPowBackward1() = default; +#else +struct TORCH_API ForeachPowBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector exponent; + bool exponent_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward2 : public TraceableFunction { + TORCH_API ForeachPowBackward2() = default; +#else +struct TORCH_API ForeachPowBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.clear(); + exponent_released_ = true; + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector exponent_; + bool exponent_released_ = false; + at::Scalar self; + std::vector result_; + bool result_released_ = false; + size_t exponent_size_; +}; +#ifdef _WIN32 +struct ForeachMinimumBackward0 : public TraceableFunction { + TORCH_API ForeachMinimumBackward0() = default; +#else +struct TORCH_API ForeachMinimumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMinimumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMinimumBackward1 : public TraceableFunction { + TORCH_API ForeachMinimumBackward1() = default; +#else +struct TORCH_API ForeachMinimumBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMinimumBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMaximumBackward0 : public TraceableFunction { + TORCH_API ForeachMaximumBackward0() = default; +#else +struct TORCH_API ForeachMaximumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMaximumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMaximumBackward1 : public TraceableFunction { + TORCH_API ForeachMaximumBackward1() = default; +#else +struct TORCH_API ForeachMaximumBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMaximumBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachNormBackward0 : public TraceableFunction { + TORCH_API ForeachNormBackward0() = default; +#else +struct TORCH_API ForeachNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar ord; + std::vector self_; + bool self_released_ = false; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct AliasBackward0_copy : public TraceableFunction { + TORCH_API AliasBackward0_copy() = default; +#else +struct TORCH_API AliasBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AliasBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AsStridedBackward0_copy : public TraceableFunction { + TORCH_API AsStridedBackward0_copy() = default; +#else +struct TORCH_API AsStridedBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct ConjBackward0_copy : public TraceableFunction { + TORCH_API ConjBackward0_copy() = default; +#else +struct TORCH_API ConjBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NegViewBackward0_copy : public TraceableFunction { + TORCH_API NegViewBackward0_copy() = default; +#else +struct TORCH_API NegViewBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NegViewBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct DiagonalBackward0_copy : public TraceableFunction { + TORCH_API DiagonalBackward0_copy() = default; +#else +struct TORCH_API DiagonalBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ExpandBackward0_copy : public TraceableFunction { + TORCH_API ExpandBackward0_copy() = default; +#else +struct TORCH_API ExpandBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExpandBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct PermuteBackward0_copy : public TraceableFunction { + TORCH_API PermuteBackward0_copy() = default; +#else +struct TORCH_API PermuteBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PermuteBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + +}; +#ifdef _WIN32 +struct ReshapeAliasBackward0_copy : public TraceableFunction { + TORCH_API ReshapeAliasBackward0_copy() = default; +#else +struct TORCH_API ReshapeAliasBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReshapeAliasBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SelectBackward0_copy : public TraceableFunction { + TORCH_API SelectBackward0_copy() = default; +#else +struct TORCH_API SelectBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SelectBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API SelectBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API SelectBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SliceBackward0_copy : public TraceableFunction { + TORCH_API SliceBackward0_copy() = default; +#else +struct TORCH_API SliceBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::optional end; + std::vector self_sym_sizes; + c10::optional start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SplitBackward0_copy : public TraceableFunction { + TORCH_API SplitBackward0_copy() = default; +#else +struct TORCH_API SplitBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + c10::SymInt split_size; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackward0_copy : public TraceableFunction { + TORCH_API SplitWithSizesBackward0_copy() = default; +#else +struct TORCH_API SplitWithSizesBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API SplitWithSizesBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API SplitWithSizesBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward0_copy : public TraceableFunction { + TORCH_API SqueezeBackward0_copy() = default; +#else +struct TORCH_API SqueezeBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward1_copy : public TraceableFunction { + TORCH_API SqueezeBackward1_copy() = default; +#else +struct TORCH_API SqueezeBackward1_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward1_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API SqueezeBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct SqueezeBackward2_copy : public TraceableFunction { + TORCH_API SqueezeBackward2_copy() = default; +#else +struct TORCH_API SqueezeBackward2_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward2_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor1_copy : public TraceableFunction { + TORCH_API SqueezeBackwardAutogradNestedTensor1_copy() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor1_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor1_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t self_dim = 0; + +}; +#ifdef _WIN32 +struct TBackward0_copy : public TraceableFunction { + TORCH_API TBackward0_copy() = default; +#else +struct TORCH_API TBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TransposeBackward0_copy : public TraceableFunction { + TORCH_API TransposeBackward0_copy() = default; +#else +struct TORCH_API TransposeBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TransposeBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim0 = 0; + int64_t dim1 = 0; + +}; +#ifdef _WIN32 +struct UnfoldBackward0_copy : public TraceableFunction { + TORCH_API UnfoldBackward0_copy() = default; +#else +struct TORCH_API UnfoldBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnfoldBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dimension = 0; + std::vector self_sym_sizes; + int64_t size = 0; + int64_t step = 0; + +}; +#ifdef _WIN32 +struct LiftFreshBackward0_copy : public TraceableFunction { + TORCH_API LiftFreshBackward0_copy() = default; +#else +struct TORCH_API LiftFreshBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LiftFreshBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UnsqueezeBackward0_copy : public TraceableFunction { + TORCH_API UnsqueezeBackward0_copy() = default; +#else +struct TORCH_API UnsqueezeBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsqueezeBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct ViewBackward0_copy : public TraceableFunction { + TORCH_API ViewBackward0_copy() = default; +#else +struct TORCH_API ViewBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ViewBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API ViewBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API ViewBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ViewAsRealBackward0_copy : public TraceableFunction { + TORCH_API ViewAsRealBackward0_copy() = default; +#else +struct TORCH_API ViewAsRealBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsRealBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ViewAsComplexBackward0_copy : public TraceableFunction { + TORCH_API ViewAsComplexBackward0_copy() = default; +#else +struct TORCH_API ViewAsComplexBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsComplexBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ValuesBackward0_copy : public TraceableFunction { + TORCH_API ValuesBackward0_copy() = default; +#else +struct TORCH_API ValuesBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackward0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ValuesBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API ValuesBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API ValuesBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NestedViewFromBufferBackward0_copy : public TraceableFunction { + TORCH_API NestedViewFromBufferBackward0_copy() = default; +#else +struct TORCH_API NestedViewFromBufferBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedViewFromBufferBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NestedViewFromJaggedBackward0_copy : public TraceableFunction { + TORCH_API NestedViewFromJaggedBackward0_copy() = default; +#else +struct TORCH_API NestedViewFromJaggedBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedViewFromJaggedBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NestedGetValuesBackward0_copy : public TraceableFunction { + TORCH_API NestedGetValuesBackward0_copy() = default; +#else +struct TORCH_API NestedGetValuesBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedGetValuesBackward0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct UnbindBackward0_copy : public TraceableFunction { + TORCH_API UnbindBackward0_copy() = default; +#else +struct TORCH_API UnbindBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct UnbindBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API UnbindBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API UnbindBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackward0_copy : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchViewBackward0_copy() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackward0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ForeachAbsBackward0 : public TraceableFunction { + TORCH_API ForeachAbsBackward0() = default; +#else +struct TORCH_API ForeachAbsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAbsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAcosBackward0 : public TraceableFunction { + TORCH_API ForeachAcosBackward0() = default; +#else +struct TORCH_API ForeachAcosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAcosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward1Scalar : public TraceableFunction { + TORCH_API ForeachAddBackward1Scalar() = default; +#else +struct TORCH_API ForeachAddBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward0List : public TraceableFunction { + TORCH_API ForeachAddBackward0List() = default; +#else +struct TORCH_API ForeachAddBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachAddBackward1ScalarList() = default; +#else +struct TORCH_API ForeachAddBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward0Tensor : public TraceableFunction { + TORCH_API ForeachAddBackward0Tensor() = default; +#else +struct TORCH_API ForeachAddBackward0Tensor : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward0Tensor"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable other_; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddcdivBackward0Scalar : public TraceableFunction { + TORCH_API ForeachAddcdivBackward0Scalar() = default; +#else +struct TORCH_API ForeachAddcdivBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcdivBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + at::Scalar value; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAddcdivBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachAddcdivBackward0ScalarList() = default; +#else +struct TORCH_API ForeachAddcdivBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcdivBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAddcmulBackward0Scalar : public TraceableFunction { + TORCH_API ForeachAddcmulBackward0Scalar() = default; +#else +struct TORCH_API ForeachAddcmulBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcmulBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + at::Scalar value; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAddcmulBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachAddcmulBackward0ScalarList() = default; +#else +struct TORCH_API ForeachAddcmulBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcmulBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAsinBackward0 : public TraceableFunction { + TORCH_API ForeachAsinBackward0() = default; +#else +struct TORCH_API ForeachAsinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAsinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAtanBackward0 : public TraceableFunction { + TORCH_API ForeachAtanBackward0() = default; +#else +struct TORCH_API ForeachAtanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAtanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachCeilBackward0 : public TraceableFunction { + TORCH_API ForeachCeilBackward0() = default; +#else +struct TORCH_API ForeachCeilBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachCeilBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMaxBackward0Scalar : public TraceableFunction { + TORCH_API ForeachClampMaxBackward0Scalar() = default; +#else +struct TORCH_API ForeachClampMaxBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMaxBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMaxBackward1List : public TraceableFunction { + TORCH_API ForeachClampMaxBackward1List() = default; +#else +struct TORCH_API ForeachClampMaxBackward1List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMaxBackward1List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachClampMaxBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachClampMaxBackward0ScalarList() = default; +#else +struct TORCH_API ForeachClampMaxBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMaxBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMinBackward0Scalar : public TraceableFunction { + TORCH_API ForeachClampMinBackward0Scalar() = default; +#else +struct TORCH_API ForeachClampMinBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMinBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMinBackward1List : public TraceableFunction { + TORCH_API ForeachClampMinBackward1List() = default; +#else +struct TORCH_API ForeachClampMinBackward1List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMinBackward1List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachClampMinBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachClampMinBackward0ScalarList() = default; +#else +struct TORCH_API ForeachClampMinBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMinBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachCosBackward0 : public TraceableFunction { + TORCH_API ForeachCosBackward0() = default; +#else +struct TORCH_API ForeachCosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachCosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachCoshBackward0 : public TraceableFunction { + TORCH_API ForeachCoshBackward0() = default; +#else +struct TORCH_API ForeachCoshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachCoshBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachDivBackward1Scalar : public TraceableFunction { + TORCH_API ForeachDivBackward1Scalar() = default; +#else +struct TORCH_API ForeachDivBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachDivBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachDivBackward1ScalarList() = default; +#else +struct TORCH_API ForeachDivBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachDivBackward0Tensor : public TraceableFunction { + TORCH_API ForeachDivBackward0Tensor() = default; +#else +struct TORCH_API ForeachDivBackward0Tensor : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward0Tensor"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachErfBackward0 : public TraceableFunction { + TORCH_API ForeachErfBackward0() = default; +#else +struct TORCH_API ForeachErfBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachErfBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachErfcBackward0 : public TraceableFunction { + TORCH_API ForeachErfcBackward0() = default; +#else +struct TORCH_API ForeachErfcBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachErfcBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachExpBackward0 : public TraceableFunction { + TORCH_API ForeachExpBackward0() = default; +#else +struct TORCH_API ForeachExpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachExpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachExpm1Backward0 : public TraceableFunction { + TORCH_API ForeachExpm1Backward0() = default; +#else +struct TORCH_API ForeachExpm1Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachExpm1Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachFloorBackward0 : public TraceableFunction { + TORCH_API ForeachFloorBackward0() = default; +#else +struct TORCH_API ForeachFloorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachFloorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachFracBackward0 : public TraceableFunction { + TORCH_API ForeachFracBackward0() = default; +#else +struct TORCH_API ForeachFracBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachFracBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLerpBackward1List : public TraceableFunction { + TORCH_API ForeachLerpBackward1List() = default; +#else +struct TORCH_API ForeachLerpBackward1List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLerpBackward1List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + tensors1_.clear(); + tensors1_released_ = true; + weights_.clear(); + weights_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + std::vector tensors1_; + bool tensors1_released_ = false; + std::vector weights_; + bool weights_released_ = false; + size_t self_size_; + size_t tensors1_size_; + size_t weights_size_; +}; +#ifdef _WIN32 +struct ForeachLerpBackward0Scalar : public TraceableFunction { + TORCH_API ForeachLerpBackward0Scalar() = default; +#else +struct TORCH_API ForeachLerpBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLerpBackward0Scalar"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar weight; + size_t self_size_; + size_t tensors1_size_; +}; +#ifdef _WIN32 +struct ForeachLgammaBackward0 : public TraceableFunction { + TORCH_API ForeachLgammaBackward0() = default; +#else +struct TORCH_API ForeachLgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLogBackward0 : public TraceableFunction { + TORCH_API ForeachLogBackward0() = default; +#else +struct TORCH_API ForeachLogBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLogBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLog10Backward0 : public TraceableFunction { + TORCH_API ForeachLog10Backward0() = default; +#else +struct TORCH_API ForeachLog10Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLog10Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLog1PBackward0 : public TraceableFunction { + TORCH_API ForeachLog1PBackward0() = default; +#else +struct TORCH_API ForeachLog1PBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLog1PBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLog2Backward0 : public TraceableFunction { + TORCH_API ForeachLog2Backward0() = default; +#else +struct TORCH_API ForeachLog2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLog2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMaximumBackward0List : public TraceableFunction { + TORCH_API ForeachMaximumBackward0List() = default; +#else +struct TORCH_API ForeachMaximumBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMaximumBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachMinimumBackward0List : public TraceableFunction { + TORCH_API ForeachMinimumBackward0List() = default; +#else +struct TORCH_API ForeachMinimumBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMinimumBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward1Scalar : public TraceableFunction { + TORCH_API ForeachMulBackward1Scalar() = default; +#else +struct TORCH_API ForeachMulBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward0List : public TraceableFunction { + TORCH_API ForeachMulBackward0List() = default; +#else +struct TORCH_API ForeachMulBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachMulBackward1ScalarList() = default; +#else +struct TORCH_API ForeachMulBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward0Tensor : public TraceableFunction { + TORCH_API ForeachMulBackward0Tensor() = default; +#else +struct TORCH_API ForeachMulBackward0Tensor : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward0Tensor"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachNegBackward0 : public TraceableFunction { + TORCH_API ForeachNegBackward0() = default; +#else +struct TORCH_API ForeachNegBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachNegBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward0Scalar : public TraceableFunction { + TORCH_API ForeachPowBackward0Scalar() = default; +#else +struct TORCH_API ForeachPowBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar exponent; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachReciprocalBackward0 : public TraceableFunction { + TORCH_API ForeachReciprocalBackward0() = default; +#else +struct TORCH_API ForeachReciprocalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachReciprocalBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachRoundBackward0 : public TraceableFunction { + TORCH_API ForeachRoundBackward0() = default; +#else +struct TORCH_API ForeachRoundBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachRoundBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSigmoidBackward0 : public TraceableFunction { + TORCH_API ForeachSigmoidBackward0() = default; +#else +struct TORCH_API ForeachSigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSignBackward0 : public TraceableFunction { + TORCH_API ForeachSignBackward0() = default; +#else +struct TORCH_API ForeachSignBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSignBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSinBackward0 : public TraceableFunction { + TORCH_API ForeachSinBackward0() = default; +#else +struct TORCH_API ForeachSinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSinhBackward0 : public TraceableFunction { + TORCH_API ForeachSinhBackward0() = default; +#else +struct TORCH_API ForeachSinhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSinhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSqrtBackward0 : public TraceableFunction { + TORCH_API ForeachSqrtBackward0() = default; +#else +struct TORCH_API ForeachSqrtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSqrtBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSubBackward1Scalar : public TraceableFunction { + TORCH_API ForeachSubBackward1Scalar() = default; +#else +struct TORCH_API ForeachSubBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSubBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSubBackward0List : public TraceableFunction { + TORCH_API ForeachSubBackward0List() = default; +#else +struct TORCH_API ForeachSubBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSubBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachSubBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachSubBackward1ScalarList() = default; +#else +struct TORCH_API ForeachSubBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSubBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachTanBackward0 : public TraceableFunction { + TORCH_API ForeachTanBackward0() = default; +#else +struct TORCH_API ForeachTanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachTanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachTanhBackward0 : public TraceableFunction { + TORCH_API ForeachTanhBackward0() = default; +#else +struct TORCH_API ForeachTanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachTanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachTruncBackward0 : public TraceableFunction { + TORCH_API ForeachTruncBackward0() = default; +#else +struct TORCH_API ForeachTruncBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachTruncBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; + +}}} // namespace torch::autograd::generated diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h new file mode 100644 index 0000000000000000000000000000000000000000..30b3721649d77e169d50ba1a04eef6680fb7d2ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h @@ -0,0 +1,59 @@ +#pragma once + +// @generated from ../tools/autograd/templates/VariableType.h + +#include +#include + +#include + +#include +#include + +#include // for size_t +#include // for function +#include // for unique_ptr +#include +#include + +namespace at { + struct Quantizer; +}; + +namespace torch { namespace autograd { + +using Variable = at::Tensor; +using at::Context; +using at::Device; +using at::Dimname; +using at::DimnameList; +using at::Generator; +using at::IntArrayRef; +using at::MemoryFormat; +using at::QScheme; +using at::Scalar; +using at::ScalarType; +using at::Storage; +using at::Tensor; +using at::TensorList; +using at::TensorOptions; +using at::Quantizer; +// This is temporary typedef to enable Quantizer in aten native function API +// we'll remove them when we are actually exposing Quantizer class +// to frontend +using ConstQuantizerPtr = const c10::intrusive_ptr&; +using c10::optional; + +namespace VariableType { + TORCH_API std::vector allCUDATypes(); + TORCH_API std::vector allXPUTypes(); + TORCH_API std::vector allCPUTypes(); + TORCH_API std::vector allPrivateUser1Types(); + + at::Tensor & unpack(Tensor & t, const char * name, int pos); + const at::Tensor & unpack(const Tensor & t, const char * name, int pos); + at::Tensor unpack_opt(const Tensor & t, const char * name, int pos); + std::vector unpack(const at::ITensorListRef& tl, const char *name, int pos); +}; + +}} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/ViewFuncs.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/ViewFuncs.h new file mode 100644 index 0000000000000000000000000000000000000000..7f422749d3ef1710fe63aeeff344d219154041b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/ViewFuncs.h @@ -0,0 +1,953 @@ +#pragma once + +// @generated from ../tools/autograd/templates/ViewFuncs.h + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +namespace torch::autograd::generated { + +using at::Scalar; +using at::Tensor; +using at::IntArrayRef; +using at::ArrayRef; +using at::Type; +using at::ScalarType; +using c10::optional; +using c10::fmap; + +#define _CONJ_VIEW_FUNC_AVAILABLE +struct _ConjViewFunc : public torch::autograd::ViewFunc { + _ConjViewFunc() + {}; + virtual ~_ConjViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define _INDICES_VIEW_FUNC_AVAILABLE +struct _IndicesViewFunc : public torch::autograd::ViewFunc { + _IndicesViewFunc() + {}; + virtual ~_IndicesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define _NEG_VIEW_VIEW_FUNC_AVAILABLE +struct _NegViewViewFunc : public torch::autograd::ViewFunc { + _NegViewViewFunc() + {}; + virtual ~_NegViewViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define _NESTED_GET_VALUES_VIEW_FUNC_AVAILABLE +struct _NestedGetValuesViewFunc : public torch::autograd::ViewFunc { + _NestedGetValuesViewFunc() + {}; + virtual ~_NestedGetValuesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define _NESTED_VIEW_FROM_BUFFER_VIEW_FUNC_AVAILABLE +struct _NestedViewFromBufferViewFunc : public torch::autograd::ViewFunc { + _NestedViewFromBufferViewFunc(const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) : nested_size(nested_size), nested_strides(nested_strides), offsets(offsets) + {}; + virtual ~_NestedViewFromBufferViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + at::Tensor nested_size; + at::Tensor nested_strides; + at::Tensor offsets; +}; + +#define _NESTED_VIEW_FROM_JAGGED_VIEW_FUNC_AVAILABLE +struct _NestedViewFromJaggedViewFunc : public torch::autograd::ViewFunc { + _NestedViewFromJaggedViewFunc(const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx) : offsets(offsets), dummy(dummy), lengths(lengths), ragged_idx(ragged_idx) + {}; + virtual ~_NestedViewFromJaggedViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + at::Tensor offsets; + at::Tensor dummy; + c10::optional lengths; + int64_t ragged_idx; +}; + +#define _RESHAPE_ALIAS_VIEW_FUNC_AVAILABLE +struct _ReshapeAliasViewFunc : public torch::autograd::ViewFunc { + _ReshapeAliasViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) : size(size.vec()), stride(stride.vec()) + {}; + virtual ~_ReshapeAliasViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + ::std::vector size; + ::std::vector stride; +}; + +#define _TEST_AUTOGRAD_MULTIPLE_DISPATCH_VIEW_VIEW_FUNC_AVAILABLE +struct _TestAutogradMultipleDispatchViewViewFunc : public torch::autograd::ViewFunc { + _TestAutogradMultipleDispatchViewViewFunc() + {}; + virtual ~_TestAutogradMultipleDispatchViewViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define _VALUES_VIEW_FUNC_AVAILABLE +struct _ValuesViewFunc : public torch::autograd::ViewFunc { + _ValuesViewFunc() + {}; + virtual ~_ValuesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define ALIAS_VIEW_FUNC_AVAILABLE +struct AliasViewFunc : public torch::autograd::ViewFunc { + AliasViewFunc() + {}; + virtual ~AliasViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define AS_STRIDED_VIEW_FUNC_AVAILABLE +struct AsStridedViewFunc : public torch::autograd::ViewFunc { + AsStridedViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) : size(size.vec()), stride(stride.vec()), storage_offset(storage_offset) + {}; + virtual ~AsStridedViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + ::std::vector size; + ::std::vector stride; + c10::optional storage_offset; +}; + +#define CCOL_INDICES_VIEW_FUNC_AVAILABLE +struct CcolIndicesViewFunc : public torch::autograd::ViewFunc { + CcolIndicesViewFunc() + {}; + virtual ~CcolIndicesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define CHUNK_VIEW_FUNC_AVAILABLE +struct ChunkViewFunc : public torch::autograd::ViewFunc { + ChunkViewFunc(int64_t chunks, int64_t dim, int64_t view_idx) : chunks(chunks), dim(dim), view_idx(view_idx) + {}; + virtual ~ChunkViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t chunks; + int64_t dim; + int64_t view_idx; +}; + +#define COL_INDICES_VIEW_FUNC_AVAILABLE +struct ColIndicesViewFunc : public torch::autograd::ViewFunc { + ColIndicesViewFunc() + {}; + virtual ~ColIndicesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define CROW_INDICES_VIEW_FUNC_AVAILABLE +struct CrowIndicesViewFunc : public torch::autograd::ViewFunc { + CrowIndicesViewFunc() + {}; + virtual ~CrowIndicesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define DIAGONAL_VIEW_FUNC_AVAILABLE +struct DiagonalViewFunc : public torch::autograd::ViewFunc { + DiagonalViewFunc(int64_t offset, int64_t dim1, int64_t dim2) : offset(offset), dim1(dim1), dim2(dim2) + {}; + virtual ~DiagonalViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t offset; + int64_t dim1; + int64_t dim2; +}; + +#define EXPAND_VIEW_FUNC_AVAILABLE +struct ExpandViewFunc : public torch::autograd::ViewFunc { + ExpandViewFunc(c10::SymIntArrayRef size, bool implicit) : size(size.vec()), implicit(implicit) + {}; + virtual ~ExpandViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + ::std::vector size; + bool implicit; +}; + +#define INDICES_VIEW_FUNC_AVAILABLE +struct IndicesViewFunc : public torch::autograd::ViewFunc { + IndicesViewFunc() + {}; + virtual ~IndicesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define NARROW_VIEW_FUNC_AVAILABLE +struct NarrowViewFunc : public torch::autograd::ViewFunc { + NarrowViewFunc(int64_t dim, c10::SymInt start, c10::SymInt length) : dim(dim), start(start), length(length) + {}; + virtual ~NarrowViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dim; + c10::SymInt start; + c10::SymInt length; +}; + +#define PERMUTE_VIEW_FUNC_AVAILABLE +struct PermuteViewFunc : public torch::autograd::ViewFunc { + PermuteViewFunc(at::IntArrayRef dims) : dims(dims.vec()) + {}; + virtual ~PermuteViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + ::std::vector dims; +}; + +#define ROW_INDICES_VIEW_FUNC_AVAILABLE +struct RowIndicesViewFunc : public torch::autograd::ViewFunc { + RowIndicesViewFunc() + {}; + virtual ~RowIndicesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define SELECT_INT_VIEW_FUNC_AVAILABLE +struct SelectIntViewFunc : public torch::autograd::ViewFunc { + SelectIntViewFunc(int64_t dim, c10::SymInt index) : dim(dim), index(index) + {}; + virtual ~SelectIntViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dim; + c10::SymInt index; +}; + +#define SLICE_TENSOR_VIEW_FUNC_AVAILABLE +struct SliceTensorViewFunc : public torch::autograd::ViewFunc { + SliceTensorViewFunc(int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) : dim(dim), start(start), end(end), step(step) + {}; + virtual ~SliceTensorViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dim; + c10::optional start; + c10::optional end; + c10::SymInt step; +}; + +#define SLICE_INVERSE_VIEW_FUNC_AVAILABLE +struct SliceInverseViewFunc : public torch::autograd::ViewFunc { + SliceInverseViewFunc(const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) : src(src), dim(dim), start(start), end(end), step(step) + {}; + virtual ~SliceInverseViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + at::Tensor src; + int64_t dim; + c10::optional start; + c10::optional end; + c10::SymInt step; +}; + +#define SPLIT_TENSOR_VIEW_FUNC_AVAILABLE +struct SplitTensorViewFunc : public torch::autograd::ViewFunc { + SplitTensorViewFunc(c10::SymInt split_size, int64_t dim, int64_t view_idx) : split_size(split_size), dim(dim), view_idx(view_idx) + {}; + virtual ~SplitTensorViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + c10::SymInt split_size; + int64_t dim; + int64_t view_idx; +}; + +#define SPLIT_WITH_SIZES_VIEW_FUNC_AVAILABLE +struct SplitWithSizesViewFunc : public torch::autograd::ViewFunc { + SplitWithSizesViewFunc(c10::SymIntArrayRef split_sizes, int64_t dim, int64_t view_idx) : split_sizes(split_sizes.vec()), dim(dim), view_idx(view_idx) + {}; + virtual ~SplitWithSizesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + ::std::vector split_sizes; + int64_t dim; + int64_t view_idx; +}; + +#define SQUEEZE_VIEW_FUNC_AVAILABLE +struct SqueezeViewFunc : public torch::autograd::ViewFunc { + SqueezeViewFunc() + {}; + virtual ~SqueezeViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define SQUEEZE_DIM_VIEW_FUNC_AVAILABLE +struct SqueezeDimViewFunc : public torch::autograd::ViewFunc { + SqueezeDimViewFunc(int64_t dim) : dim(dim) + {}; + virtual ~SqueezeDimViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dim; +}; + +#define SQUEEZE_DIMS_VIEW_FUNC_AVAILABLE +struct SqueezeDimsViewFunc : public torch::autograd::ViewFunc { + SqueezeDimsViewFunc(at::IntArrayRef dim) : dim(dim.vec()) + {}; + virtual ~SqueezeDimsViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + ::std::vector dim; +}; + +#define T_VIEW_FUNC_AVAILABLE +struct TViewFunc : public torch::autograd::ViewFunc { + TViewFunc() + {}; + virtual ~TViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define TRANSPOSE_INT_VIEW_FUNC_AVAILABLE +struct TransposeIntViewFunc : public torch::autograd::ViewFunc { + TransposeIntViewFunc(int64_t dim0, int64_t dim1) : dim0(dim0), dim1(dim1) + {}; + virtual ~TransposeIntViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dim0; + int64_t dim1; +}; + +#define UNBIND_INT_VIEW_FUNC_AVAILABLE +struct UnbindIntViewFunc : public torch::autograd::ViewFunc { + UnbindIntViewFunc(int64_t dim, int64_t view_idx) : dim(dim), view_idx(view_idx) + {}; + virtual ~UnbindIntViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dim; + int64_t view_idx; +}; + +#define UNFOLD_VIEW_FUNC_AVAILABLE +struct UnfoldViewFunc : public torch::autograd::ViewFunc { + UnfoldViewFunc(int64_t dimension, int64_t size, int64_t step) : dimension(dimension), size(size), step(step) + {}; + virtual ~UnfoldViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dimension; + int64_t size; + int64_t step; +}; + +#define UNSQUEEZE_VIEW_FUNC_AVAILABLE +struct UnsqueezeViewFunc : public torch::autograd::ViewFunc { + UnsqueezeViewFunc(int64_t dim) : dim(dim) + {}; + virtual ~UnsqueezeViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + int64_t dim; +}; + +#define VALUES_VIEW_FUNC_AVAILABLE +struct ValuesViewFunc : public torch::autograd::ViewFunc { + ValuesViewFunc() + {}; + virtual ~ValuesViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define VIEW_VIEW_FUNC_AVAILABLE +struct ViewViewFunc : public torch::autograd::ViewFunc { + ViewViewFunc(c10::SymIntArrayRef size) : size(size.vec()) + {}; + virtual ~ViewViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + ::std::vector size; +}; + +#define VIEW_DTYPE_VIEW_FUNC_AVAILABLE +struct ViewDtypeViewFunc : public torch::autograd::ViewFunc { + ViewDtypeViewFunc(at::ScalarType dtype) : dtype(dtype) + {}; + virtual ~ViewDtypeViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + at::ScalarType dtype; +}; + +#define VIEW_AS_COMPLEX_VIEW_FUNC_AVAILABLE +struct ViewAsComplexViewFunc : public torch::autograd::ViewFunc { + ViewAsComplexViewFunc() + {}; + virtual ~ViewAsComplexViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +#define VIEW_AS_REAL_VIEW_FUNC_AVAILABLE +struct ViewAsRealViewFunc : public torch::autograd::ViewFunc { + ViewAsRealViewFunc() + {}; + virtual ~ViewAsRealViewFunc() override {}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override; + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override; + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + +protected: + virtual void set_symints(std::vector) override; + virtual void set_tensors(std::vector) override; + +private: + +}; + +} // namespace torch::autograd::generated diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..90e4910e96a347215d587d879bdcee0f52ffbb9f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +// @generated from ../tools/autograd/templates/python_functions.h + +// Python bindings for automatically generated autograd functions + +namespace torch { namespace autograd { namespace generated { + +void initialize_autogenerated_functions_0(PyObject* module); +void initialize_autogenerated_functions_1(PyObject* module); +void initialize_autogenerated_functions_2(PyObject* module); +void initialize_autogenerated_functions_3(PyObject* module); +void initialize_autogenerated_functions_4(PyObject* module); + +inline void initialize_autogenerated_functions(PyObject* module) { + initialize_autogenerated_functions_0(module); + initialize_autogenerated_functions_1(module); + initialize_autogenerated_functions_2(module); + initialize_autogenerated_functions_3(module); + initialize_autogenerated_functions_4(module); +} + +}}} // namespace torch::autograd::generated diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h new file mode 100644 index 0000000000000000000000000000000000000000..81fa1cb708d38382e05048942671c451bf4e04e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h @@ -0,0 +1,98 @@ +#pragma once + +namespace torch { +namespace autograd { +namespace generated { + +PyTypeObject* get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_structseq(); +PyTypeObject* get__fused_moving_avg_obs_fq_helper_structseq(); +PyTypeObject* get__linalg_det_structseq(); +PyTypeObject* get__linalg_det_out_structseq(); +PyTypeObject* get__linalg_eigh_structseq(); +PyTypeObject* get__linalg_eigh_out_structseq(); +PyTypeObject* get__linalg_slogdet_structseq(); +PyTypeObject* get__linalg_slogdet_out_structseq(); +PyTypeObject* get__linalg_solve_ex_structseq(); +PyTypeObject* get__linalg_solve_ex_out_structseq(); +PyTypeObject* get__linalg_svd_structseq(); +PyTypeObject* get__linalg_svd_out_structseq(); +PyTypeObject* get__lu_with_info_structseq(); +PyTypeObject* get__scaled_dot_product_cudnn_attention_structseq(); +PyTypeObject* get__scaled_dot_product_efficient_attention_structseq(); +PyTypeObject* get__scaled_dot_product_flash_attention_structseq(); +PyTypeObject* get__scaled_dot_product_flash_attention_for_cpu_structseq(); +PyTypeObject* get__unpack_dual_structseq(); +PyTypeObject* get_aminmax_structseq(); +PyTypeObject* get_aminmax_out_structseq(); +PyTypeObject* get_cummax_structseq(); +PyTypeObject* get_cummax_out_structseq(); +PyTypeObject* get_cummin_structseq(); +PyTypeObject* get_cummin_out_structseq(); +PyTypeObject* get_frexp_structseq(); +PyTypeObject* get_frexp_out_structseq(); +PyTypeObject* get_geqrf_out_structseq(); +PyTypeObject* get_geqrf_structseq(); +PyTypeObject* get_histogram_out_structseq(); +PyTypeObject* get_histogram_structseq(); +PyTypeObject* get_histogramdd_structseq(); +PyTypeObject* get_kthvalue_structseq(); +PyTypeObject* get_kthvalue_out_structseq(); +PyTypeObject* get_linalg_cholesky_ex_structseq(); +PyTypeObject* get_linalg_cholesky_ex_out_structseq(); +PyTypeObject* get_linalg_eig_structseq(); +PyTypeObject* get_linalg_eig_out_structseq(); +PyTypeObject* get_linalg_eigh_structseq(); +PyTypeObject* get_linalg_eigh_out_structseq(); +PyTypeObject* get_linalg_inv_ex_structseq(); +PyTypeObject* get_linalg_inv_ex_out_structseq(); +PyTypeObject* get_linalg_ldl_factor_structseq(); +PyTypeObject* get_linalg_ldl_factor_out_structseq(); +PyTypeObject* get_linalg_ldl_factor_ex_structseq(); +PyTypeObject* get_linalg_ldl_factor_ex_out_structseq(); +PyTypeObject* get_linalg_lstsq_structseq(); +PyTypeObject* get_linalg_lstsq_out_structseq(); +PyTypeObject* get_linalg_lu_structseq(); +PyTypeObject* get_linalg_lu_out_structseq(); +PyTypeObject* get_linalg_lu_factor_structseq(); +PyTypeObject* get_linalg_lu_factor_out_structseq(); +PyTypeObject* get_linalg_lu_factor_ex_structseq(); +PyTypeObject* get_linalg_lu_factor_ex_out_structseq(); +PyTypeObject* get_linalg_qr_structseq(); +PyTypeObject* get_linalg_qr_out_structseq(); +PyTypeObject* get_linalg_slogdet_structseq(); +PyTypeObject* get_linalg_slogdet_out_structseq(); +PyTypeObject* get_linalg_solve_ex_structseq(); +PyTypeObject* get_linalg_solve_ex_out_structseq(); +PyTypeObject* get_linalg_svd_structseq(); +PyTypeObject* get_linalg_svd_out_structseq(); +PyTypeObject* get_lu_unpack_structseq(); +PyTypeObject* get_lu_unpack_out_structseq(); +PyTypeObject* get_max_structseq(); +PyTypeObject* get_max_out_structseq(); +PyTypeObject* get_median_structseq(); +PyTypeObject* get_median_out_structseq(); +PyTypeObject* get_min_structseq(); +PyTypeObject* get_min_out_structseq(); +PyTypeObject* get_mode_structseq(); +PyTypeObject* get_mode_out_structseq(); +PyTypeObject* get_nanmedian_structseq(); +PyTypeObject* get_nanmedian_out_structseq(); +PyTypeObject* get_qr_out_structseq(); +PyTypeObject* get_qr_structseq(); +PyTypeObject* get_slogdet_structseq(); +PyTypeObject* get_slogdet_out_structseq(); +PyTypeObject* get_sort_out_structseq(); +PyTypeObject* get_sort_structseq(); +PyTypeObject* get_svd_out_structseq(); +PyTypeObject* get_svd_structseq(); +PyTypeObject* get_topk_out_structseq(); +PyTypeObject* get_topk_structseq(); +PyTypeObject* get_triangular_solve_out_structseq(); +PyTypeObject* get_triangular_solve_structseq(); + +} + +void initReturnTypes(PyObject* module); + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h new file mode 100644 index 0000000000000000000000000000000000000000..7c19adf33b8c9fd62bebac761e45b4ac8788aaa9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h @@ -0,0 +1,736 @@ +#pragma once + +// @generated from ../tools/autograd/templates/variable_factories.h + +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +#include +#include +#include + +namespace torch { + +/// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types +/// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to +/// support it in the future by iterating over all sub-lists to find +/// the largest data type that can represent all of the elements, or by using +/// variadic templates. +/// +/// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` / +/// (nested) braced-init-list of floating-point types always produces a tensor of dtype +/// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior. +/// +/// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` / +/// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong` +/// (aka. int64_t), matching Python `torch.tensor` behavior. +/// +/// NOTE: The following dtypes are not supported by `torch::tensor` currently: +/// - `unsigned int` +/// - `unsigned long int` +/// - `unsigned long long int` +/// - `long long int` +inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) { + return autograd::make_variable( + // note: we remove the requires_grad setting from the TensorOptions because + // it is ignored anyways (and we actually have an assertion that it isn't set + // which would fail otherwise). We handle requires_grad explicitly here + // instead of passing it through to the kernel. + tensor_data_container.convert_to_tensor(options.requires_grad(c10::nullopt)), + options.requires_grad()); +} + +/// A generic deleter function. +using Deleter = std::function; +using at::MemoryFormat; + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor, `strides` the +/// stride in each dimension. The `deleter` function (a +/// `std::function`) will be called on the `data` when the Tensor +/// data would normally be deallocated. The `TensorOptions` specify additional +/// configuration options for the returned tensor, such as what type to +/// interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + at::IntArrayRef strides, + const Deleter& deleter, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, strides, deleter, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor, `strides` the +/// stride in each dimension. The `TensorOptions` +/// specify additional configuration options for the returned tensor, such as +/// what type to interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + at::IntArrayRef strides, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, strides, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor. The `deleter` +/// (a `std::function`) function will be called on the `data` when +/// the Tensor data would normally be deallocated. The `TensorOptions` specify +/// additional configuration options for the returned tensor, such as what type +/// to interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + const Deleter& deleter, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, deleter, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +/// Exposes the given `data` as a `Tensor` without taking ownership of the +/// original data. `sizes` should specify the shape of the tensor. The +/// `TensorOptions` specify additional configuration options for the returned +/// tensor, such as what type to interpret the `data` as. +inline at::Tensor from_blob( + void* data, + at::IntArrayRef sizes, + const at::TensorOptions& options = at::TensorOptions()) { + at::Tensor tensor = ([&]() { + at::AutoDispatchBelowAutograd guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + return at::from_blob(data, sizes, options.requires_grad(c10::nullopt)); + })(); + return autograd::make_variable(tensor, options.requires_grad()); +} + +inline at::Tensor _make_dep_token(at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_make_dep_token(at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_cudnn_init_dropout_state(dropout, train, dropout_seed, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::arange(end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::arange(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::arange(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::bartlett_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::bartlett_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor blackman_window(int64_t window_length, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::blackman_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::blackman_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty(at::IntArrayRef size, c10::optional names, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty(size, names, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty_permuted(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty_permuted_symint(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional memory_format = MemoryFormat::Contiguous) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_empty_affine_quantized(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional memory_format = MemoryFormat::Contiguous) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_empty_affine_quantized_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional memory_format = MemoryFormat::Contiguous) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional memory_format = MemoryFormat::Contiguous) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_empty_per_channel_affine_quantized_symint(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty_quantized(size, qtensor, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty_strided(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::empty_strided_symint(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor eye(int64_t n, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::eye(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor eye_symint(c10::SymInt n, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::eye_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::eye(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor eye_symint(c10::SymInt n, c10::SymInt m, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::eye_symint(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::full(size, fill_value, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::full(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::full_symint(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::full_like(self, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor from_file(c10::string_view filename, c10::optional shared = c10::nullopt, c10::optional size = 0, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::from_file(filename, shared, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::hann_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::hann_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::hamming_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::hamming_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::kaiser_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::kaiser_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::kaiser_window(window_length, periodic, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor ones(at::IntArrayRef size, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::ones(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::ones(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::ones_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::ones_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::scalar_tensor(s, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand(at::IntArrayRef size, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand(at::IntArrayRef size, c10::optional generator, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::rand_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_symint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_symint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_symint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_symint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_like(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_like_symint(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_like(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randint_like_symint(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn(at::IntArrayRef size, c10::optional generator, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn(at::IntArrayRef size, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randn_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randperm(int64_t n, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randperm(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randperm_symint(c10::SymInt n, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randperm_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randperm(int64_t n, c10::optional generator, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randperm(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor randperm_symint(c10::SymInt n, c10::optional generator, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::randperm_symint(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step = 1, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::range(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::range(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor zeros(at::IntArrayRef size, c10::optional names, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::zeros(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_efficientzerotensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_efficientzerotensor_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::zeros(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::zeros_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::zeros_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_compressed_tensor_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_compressed_tensor_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_compressed_tensor_unsafe_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_coo_tensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options = {}, c10::optional is_coalesced = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_coo_tensor(indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional is_coalesced = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::sparse_coo_tensor(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional is_coalesced = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_coo_tensor_unsafe(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional is_coalesced = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_coo_tensor_unsafe_symint(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors_symint(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options = {}, bool non_blocking = false, c10::optional memory_format = c10::nullopt) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::_to_copy(self, at::TensorOptions(options).requires_grad(c10::nullopt), non_blocking, memory_format), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::tril_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::triu_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional generator = c10::nullopt, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::normal(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional generator = c10::nullopt, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::normal_symint(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor fft_fftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::fft_fftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} +inline at::Tensor fft_rfftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) { + at::AutoDispatchBelowADInplaceOrView guard; + return autograd::make_variable(at::fft_rfftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad()); +} + +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h new file mode 100644 index 0000000000000000000000000000000000000000..03a9647cad833479e4fd2ac75987aca21f1e0633 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h @@ -0,0 +1,226 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +namespace torch::autograd { + +using edge_list = std::vector; +struct ReadyQueue; + +static constexpr int NO_DEVICE = -2; +static constexpr int CPU_DEVICE = -1; + +// GraphTask holds metadata needed for a single execution of backward() +struct GraphTask : std::enable_shared_from_this { + std::atomic outstanding_tasks_{0}; + // Indicates if an error occurred while executing any task. When this is + // true, it signals all threads to stop executing. + std::atomic_bool has_error_{false}; + std::atomic_bool future_completed_{false}; + // It is safe to read keep_graph_ without synchronization + bool keep_graph_; + + // To protect reads/writes to not_ready_, dependencies_, captured_vars_, + // has_error_, future_result_, cpu_ready_queue_, and leaf_streams. + std::mutex mutex_; + std::unordered_map not_ready_; + std::unordered_map dependencies_; + + // Records the nodes that are in the graph + std::unordered_set nodes_in_graph_; + c10::SmallVector graph_roots_; + // Note [Exec info] + // Exec info is created for each GraphTask, which allows filtering paths on + // the graph that are not needed. It has a bit complicated semantics. If it's + // empty, it means the task is run in a "default" mode, which means that all + // next_edges we encounter should get executed. If it's not empty, only + // functions that have an entry and this entry has needed == True should be + // executed. exec_info is only empty when the graph is executed via + // .backward() and the inputs parameter is not passed. Otherwise, when + // executed through .grad(), or when inputs arg is specified for .backward(), + // exec_info will be non-empty. + // + struct ExecInfo { + struct Capture { + Capture(const Capture&) = delete; + Capture(Capture&&) = default; + + Capture(int input_idx, int output_idx) + : input_idx_(input_idx), output_idx_(output_idx) {} + int input_idx_; // within Node inputs + int output_idx_; // within the output vector of a GraphTask + + // This hook will be executed after a grad is captured. The captured + // grad will be replaced by the return value of the hook. + struct GradCaptureHook { + virtual ~GradCaptureHook() = default; + virtual at::Tensor operator()(const at::Tensor& grad) = 0; + }; + // NOTE [Deprecated capture hooks] + // + // The current status of capture hooks is that we continue to support + // the single usage of it by distributed in the dist_engine. If anyone + // else needs to use it for other purposes, they should file an issue. + // + // Capture hooks were originally created because there did not exist + // any way to register pre/post hooks to grad_fn in a way such that it + // would still be executed even if that is the grad_fn of a Tensor + // passed as input= of .grad. As far as I know, only dist_engine uses + // this hook. + // + // However, there are other alternatives today like tensor hooks that can + // replace the usage that originally motivated its creation. Also, + // Captures hooks are an outlier in terms of the types of hook that + // autograd offers in how it is registered and behaves, e.g. it is a hook + // registered not to the graph, but to a particular graph_task! This makes + // it a burden to maintain. + // + // It would be very nice to clean up/do a migration from pre/post + // hooks used in distributed to use tensor hooks, but for now we just + // mark this method as deprecated to prevent additional usage. + // + // If you still think you really need to capture hooks, please file an + // issue (and tag autograd). + const std::vector>& + DO_NOT_USE_DEPRECATED_get_capture_hooks() const { + return hooks_; + } + // See NOTE [deprecated capture hooks] + void DO_NOT_USE_DEPRECATED_register_capture_hook( + std::unique_ptr hook) { + hooks_.push_back(std::move(hook)); + } + + private: + // The hooks will be called one by one in the order as they were added. + // The input grad of a hook will be the output of its preceding hook. The + // first hook will take the captured grad as the input. The output of the + // last hook will replace the captured grad. + std::vector> hooks_; + }; + + bool should_execute() const { + return needed_ || captures_; + } + + bool needed_ = false; + std::unique_ptr> captures_; + }; + // exec_info_ is safe to read without synchronization + std::unordered_map exec_info_; + // Captures variables are grads captured that we return to the user. After + // execution of the GraphTask is completed, the captured_vars_ are moved + // out of the GraphTask and are no longer valid. + std::vector captured_vars_; + + // Note: this field is not ready to be used until the proper + // `thread_locals_.set_grad_mode()` call in the constructor. + at::ThreadLocalState thread_locals_ = at::ThreadLocalState(); + + std::unordered_set leaf_streams; + + // Per-device current streams of the execute() that called this GraphTask. + // These will be synced with leaf_streams in exec_post_processing. + std::vector> caller_current_streams_; + + // Collects caller_current_streams_ for the accelerator device. + void stash_current_streams(); + + void init_to_execute( + Node& graph_root, + const edge_list& outputs, + bool accumulate_grad, + uint64_t min_topo_nr); + + // The value of worker_device in the thread that created this task. + // See Note [Reentrant backwards] + // Safe to read owner_ and reentrant_depth_ without synchronization + int owner_; + // The number of parent graph tasks for this graph task + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const int reentrant_depth_; + + bool can_checkpoint() const { + return exec_info_.empty(); + } + + // check if the GraphTask is completed or not + bool completed(); + // mark the graph task as completed and trigger post processing + void mark_as_completed_and_run_post_processing(); + + // Set an appropriate exception on this graph_task which was encountered while + // running the provided function. + void set_exception(std::exception_ptr eptr, const std::shared_ptr& fn); + + // Set an appropriate exception on this graph_task which was encountered while + // running the provided function. But doesn't signal completion on + // 'future_result_' right away. The user needs to explicitly mark + // 'future_result_' completed with an appropriate exception. + void set_exception_without_signal(const std::shared_ptr& fn); + + // Whether or not to stop execution for this GraphTask when an error is + // encountered. When set to true, this would cause Engine::execute() to throw + // an exception as soon as the autograd engine receives an exception. + bool exit_on_error_; + + // CPU threads are dedicated to processing CPU work for the backward they + // invoked. So any given graph task maintains its own cpu_ready_queue_ where + // you should send work for it to be done. We memoize the cpu_ready_queue_ per + // GraphTask so that we know which ready queue we should push to if we are on + // device thread (i.e. GPU) and but next NodeTask should be run on CPU. + std::shared_ptr cpu_ready_queue_; + + // Future representing the completion of the graph task. Notified when all + // tasks are done. + c10::intrusive_ptr future_result_; + + // Final callbacks installed during execution of this GraphTask + std::vector> final_callbacks_; + // To protect reads and writes to final_callbacks_. Intentionally no reusing + // mutex_ as the two are protecting different data structures. + std::mutex final_callbacks_lock_; + + utils::DelayWarningHandler warning_handler_; + + uint64_t id_; + + GraphTask( + bool keep_graph, + bool grad_mode, + int reentrant_depth, + std::shared_ptr cpu_ready_queue, + c10::SmallVector graph_roots, + bool exit_on_error = false); + + private: + // run GraphTask post processing + void exec_post_processing(); +}; + +// The guard that sets and restores current_graph_task. +class GraphTaskGuard { + public: + explicit GraphTaskGuard(std::shared_ptr graph_task); + ~GraphTaskGuard(); + + void restore_current_graph_task(); + + private: + std::shared_ptr last_graph_task_; +}; + +TORCH_API const std::unordered_map* +get_current_graph_task_exec_info(); +TORCH_API const std::unordered_set* +get_current_graph_task_nodes_in_graph(); +TORCH_API bool get_current_graph_task_keep_graph(); +TORCH_API std::vector get_current_graph_task_execution_order(); +TORCH_API int get_current_graph_task_id(); +void add_node_to_current_graph_task_exec_info(Node* fn); + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..d8ef3396cb6d860520a9b950f8a9bbb7cc63da55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h @@ -0,0 +1,45 @@ +#pragma once + +// The InputBuffer class accumulates a list of Variables for use by a +// function. It implements logic to avoid modifying the passed +// values in-place (adding an input twice will accumulate the result). +// This behaviour is needed and used only in backward graphs. + +#include +#include + +#include +#include +#include + +namespace torch::autograd { + +struct InputBuffer { + explicit InputBuffer(size_t size) : buffer(size) {} + InputBuffer(const InputBuffer& other) = delete; + InputBuffer(InputBuffer&& other) = default; + explicit InputBuffer(variable_list&& inputs) : buffer(std::move(inputs)){}; + InputBuffer& operator=(InputBuffer&& other) = default; + + // Accumulates the variable at a specified index. + // The optional CUDA streams determine which stream the accumulation + // is run on and how the addition is synchronized. + TORCH_API void add( + size_t pos, + Variable&& var, + const c10::optional& opt_producer_stream, + const c10::optional& opt_consumer_stream); + + at::Device device() const; + + Variable operator[](size_t pos) { + return buffer[pos]; + } + + // Returns the inputs as a list of variables. Destroys given InputBuffer. + static std::vector variables(InputBuffer&& g); + + std::vector buffer; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/jit_decomp_interface.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/jit_decomp_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..d20c425d28bb1a54057953424b20df03a16d58d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/jit_decomp_interface.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include + +// NOTE: [Jit Decomposition Interface] +// +// For some context of why we need this at all, see NOTE: [forward-mode AD +// decompositions mechanism] +// +// Introducing that mechanism from the NOTE is problematic because: +// - it relies on TorchScript, so now VariableTypeX.cpp depends on TorchScript. +// - there exist internal builds like lite_trainer, which depend on VariableType +// but do not depend on TorchScript. +// +// For internal builds like lite_trainer builds to pass, and for OSS builds that +// do depend on TorchScript to still support the forward AD decomp mechanism, we +// implement a PImpl pattern to avoid a static dependency in favor of a dynamic +// one +// - during static initialization time, if the library is built with TorchScript +// setJitDecompImpl is called in decomposition_registry.cpp setting a global +// ptr to the impl +// - when the program is run,if getJitDecompImpl returns a non null ptr, we can +// carry on normally, otherwise we gracefully error out +// +// For extra context, see VariableHooksInterface.h, where a similar technique +// is used + +namespace torch::autograd::impl { + +struct TORCH_API JitDecompInterface { + virtual ~JitDecompInterface() = default; + virtual bool has_jit_decomposition( + const c10::FunctionSchema& schema) const = 0; + virtual void run_jit_decomposition( + const c10::OperatorHandle& op, + jit::Stack* stack) const = 0; +}; + +TORCH_API void setJitDecompImpl(JitDecompInterface* impl); +TORCH_API JitDecompInterface* getJitDecompImpl(); + +struct TORCH_API JitDecompRegisterer { + explicit JitDecompRegisterer(JitDecompInterface* impl) { + setJitDecompImpl(impl); + } +}; + +} // namespace torch::autograd::impl diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..519f49005f776412a448bd773547575b1685214c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler.h @@ -0,0 +1,4 @@ +#pragma once + +#include +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_kineto.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_kineto.h new file mode 100644 index 0000000000000000000000000000000000000000..6ea7cf63d6a0a04b8dc0d1ec5d725e2289744991 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_kineto.h @@ -0,0 +1,188 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +namespace torch { + +namespace profiler::impl { +struct Result; +namespace kineto { +struct ActivityTraceWrapper; +} // namespace kineto +} // namespace profiler::impl + +namespace autograd::profiler { +using experimental_event_t = std::shared_ptr; +using extra_meta_t = std::unordered_map; + +struct TORCH_API KinetoEvent { + KinetoEvent( + const std::shared_ptr&, + const bool verbose); + + uint64_t startThreadId() const; + uint64_t endThreadId() const; + uint8_t activityType() const; + uint64_t fwdThreadId() const; + bool hasShapes() const; + const c10::ArrayRef> shapes() const; + bool hasTypes() const; + const c10::ArrayRef dtypes() const; + bool hasConcreteInputs() const; + const c10::ArrayRef concreteInputs() const; + uint64_t flops() const; + int64_t sequenceNr() const; + bool hasStack() const; + const c10::ArrayRef stack() const; + uint8_t scope() const; + bool hasModuleHierarchy() const; + const c10::ArrayRef moduleHierarchy() const; + int64_t debugHandle() const; + std::string name() const; + c10::DeviceType deviceType() const; + int deviceIndex() const; + int64_t nBytes() const; + uint64_t startUs() const; + uint64_t durationUs() const; + bool isAsync() const; + uint64_t correlationId() const; + uint64_t linkedCorrelationId() const; + int64_t deviceResourceId() const; + std::string backend() const; + bool isPythonFunction() const; + int64_t cudaElapsedUs() const; + int64_t privateuse1ElapsedUs() const; + void getPerfEventCounters(torch::profiler::perf_counters_t&) const; + extra_meta_t extraMeta() const; + + private: + torch::profiler::impl::ProfilerVoidEventStub fallbackStart() const; + torch::profiler::impl::ProfilerVoidEventStub fallbackEnd() const; + + std::shared_ptr result_; + std::vector python_stack_; + + // Copy fields from result so we can return ArrayRefs. + std::vector> shapes_; + std::vector dtypes_; + std::vector concrete_inputs_; +}; + +// Consolidating events returned directly from Kineto +// with events manually created by us (e.g. start/stop marks, +// memory allocation events) +struct TORCH_API ProfilerResult { + ProfilerResult(); + ProfilerResult( + uint64_t start_time, + std::vector events, + std::unique_ptr&& + trace, + std::vector&& event_tree); + ~ProfilerResult(); + + uint64_t trace_start_us() const { + return trace_start_us_; + } + + const std::vector& events() const { + return events_; + } + + const std::vector& event_tree() const { + return event_tree_; + } + + void save(const std::string& path); + + private: + uint64_t trace_start_us_ = 0; + std::vector events_; + std::unique_ptr trace_; + std::vector event_tree_; +}; + +/* + * This API is used by backends to record latency of events that + * happened in the backend but were not visible to pytorch runtime. + * For example, if part of the model is lowered to a dsp backend, then + * the execution of that part of the model is delegated to the backend. + * When backend finishes execution it has an option to provide profiling + * information (latency only at the moment) corresponding to different operators + * that were executed in the backend. + * When such events are recorded by backend using this API, the event + * records will be collected by active kineto profiler. If no kineto profiler + * is active then the event is ignored. + * This provides us with a way to generate all the profiling information + * for a model regardless of where model (or part of it) executed. + * @param start_time_us: start time in us of the event + * @param end_time_us: end time in us of the event + * @param debug_handle: debug handle to correlate this event/op with + * model level module/source information + * @param scope: scope of the event, e.g. LITE_INTERPRETER, RECORD_FN etc. + * @param event_name: name of the event, e.g. op name + * @param backend_name: name of the backend where the event took place. + */ +TORCH_API void reportBackendEventToActiveKinetoProfiler( + const int64_t start_time_us, + const int64_t end_time_us, + const int64_t debug_handle, + const at::RecordScope scope, + const std::string& event_name, + const std::string& backend_name); + +TORCH_API void enableProfiler( + const torch::profiler::impl::ProfilerConfig& config, + const std::set& activities, + const std::unordered_set& scopes = {}); + +/* + * Same as enableProfiler but with callback to do post-processing of + * KinetoEvents. + * enableProfilerWithEventPostProcess enables profiler to capture + * specified activities, with specified RecordFunction scope, if any. + * Additionally, it takes a functor that does in-place post processing of + * events, e.g. populate stack trace or module hierarchy information lazily + * using debug_handle. + * Example usage is with lite interpreter that has recording scope of + * LITE_INTERPRETER. In this case lite interpreter runtime, records debug + * handles in RecordFunction, along with other information. Debug handles are + * eventually passed down to KinetoEvent and recorded as part of the event. + * KinetoEdgeCPUProfiler, in torch/csrc/jit/mobile/profiler_edge.cpp, enables + * profiler using post-processing callback, via + * enableProfilerWithEventPostProcess, that takes these debug handles and + * generates stack trace and module hierarchy information, once profiling is + * done. + */ +using post_process_t = std::function&, + /*jit_modules */ std::vector&)>; +TORCH_API void enableProfilerWithEventPostProcess( + const torch::profiler::impl::ProfilerConfig& config, + const std::set& activities, + post_process_t&& cb, + const std::unordered_set& scopes = {}); + +TORCH_API std::unique_ptr disableProfiler(); + +TORCH_API void prepareProfiler( + const torch::profiler::impl::ProfilerConfig& config, + const std::set& activities); + +} // namespace autograd::profiler + +namespace profiler::impl { + +// Experimental. +TORCH_API void _reportVulkanEventToProfiler(vulkan_id_t id); + +} // namespace profiler::impl + +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h new file mode 100644 index 0000000000000000000000000000000000000000..e74ddd8a2296edbc75958063baf0d25e6467203f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h @@ -0,0 +1,406 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace torch::autograd { + +struct Node; + +namespace profiler { + +enum class C10_API_ENUM EventKind : uint16_t { + Mark, + PushRange, + PopRange, + MemoryAlloc, +}; + +// To be deprecated, once we switch to Kineto profiling +struct TORCH_API LegacyEvent { + LegacyEvent( + EventKind kind, + at::StringView name, + uint16_t thread_id, + bool record_cuda, + at::RecordFunctionHandle handle = 0, + std::vector>&& shapes = {}, + int64_t node_id = -1, + bool is_async = false) + : name_(std::move(name)), + kind_(kind), + thread_id_(thread_id), + handle_(handle), + shapes_(std::move(shapes)), + node_id_(node_id), + is_async_(is_async) { + record(record_cuda); + } + + // Constructor to be used in conjunction with LegacyEvent::fromIValue. + LegacyEvent( + EventKind kind, + at::StringView name, + uint16_t thread_id, + at::RecordFunctionHandle handle, + std::vector>&& shapes, + int64_t node_id, + bool is_remote, + int64_t cpu_memory_usage, + int64_t cpu_ns, + bool cuda_recorded, + int64_t cuda_memory_usage = 0, + c10::DeviceIndex device = -1, + double cuda_us = -1) + : cpu_ns_(cpu_ns), + name_(std::move(name)), + kind_(kind), + thread_id_(thread_id), + handle_(handle), + shapes_(std::move(shapes)), + cpu_memory_usage_(cpu_memory_usage), + cuda_memory_usage_(cuda_memory_usage), + device_(device), + node_id_(node_id), + is_remote_(is_remote), + cuda_us_(static_cast(cuda_us)) { + // Sanity check values that were deserialized + TORCH_INTERNAL_ASSERT(cpu_ns_ > 0); + if (cuda_recorded) { + TORCH_INTERNAL_ASSERT(device_ >= 0); + TORCH_INTERNAL_ASSERT(cuda_us_ >= 0); + } + } + + // Returns IValues corresponding to event structure, to be used for + // serialization. + at::IValue toIValue() const; + + // Reconstructs an event from IValues given by toIValue. + static LegacyEvent fromIValue(const at::IValue& eventIValue); + + void record(bool record_cuda); + + std::string kindStr() const { + switch (kind_) { + case EventKind::Mark: + return "mark"; + case EventKind::PushRange: + return "push"; + case EventKind::PopRange: + return "pop"; + case EventKind::MemoryAlloc: + return "memory_alloc"; + } + throw std::runtime_error("unknown event kind"); + } + + EventKind kind() const { + return kind_; + } + + const char* name() const { + return name_.str(); + } + + uint64_t threadId() const { + return thread_id_; + } + + std::vector> shapes() const { + return shapes_; + } + + double cpuElapsedUs(const LegacyEvent& e) const { + return static_cast(e.cpu_ns_ - cpu_ns_) / (1000.0); + } + + void setCpuUs(int64_t cpu_us) { + cpu_ns_ = cpu_us * 1000; + } + + double cpuUs() const { + return static_cast(cpu_ns_) / (1000.0); + } + + double cudaElapsedUs(const LegacyEvent& e) const; + + bool hasCuda() const { + return cuda_event != nullptr || (isRemote() && device_ != -1); + } + + c10::DeviceIndex device() const { + return device_; + } + + void updateMemoryStats(int64_t alloc_size, c10::Device device) { + if (device.is_cuda() || device.type() == c10::DeviceType::HIP) { + cuda_memory_usage_ = alloc_size; + } else if ( + device.is_cpu() || device.type() == c10::DeviceType::MKLDNN || + device.type() == c10::DeviceType::IDEEP) { + cpu_memory_usage_ = alloc_size; + } else { + LOG(WARNING) << "Unsupported memory profiling device: " << device; + } + } + + int64_t cpuMemoryUsage() const { + return cpu_memory_usage_; + } + + int64_t cudaMemoryUsage() const { + return cuda_memory_usage_; + } + + at::RecordFunctionHandle handle() const { + return handle_; + } + + // Node ID corresponding to this event. + int64_t nodeId() const { + return node_id_; + } + + // Set Node ID on this event. + void setNodeId(int64_t node_id) { + node_id_ = node_id; + } + + void setName(at::StringView newName_) { + name_ = std::move(newName_); + } + + bool isRemote() const { + return is_remote_; + } + + void setCudaUs(int64_t cuda_us) { + cuda_us_ = cuda_us; + } + + void setSequenceNr(int64_t sequence_nr) { + sequence_nr_ = sequence_nr; + } + + int64_t sequenceNr() const { + return sequence_nr_; + } + + void setCorrelationId(uint64_t correlation_id) { + correlation_id_ = correlation_id; + } + + uint64_t correlationId() const { + return correlation_id_; + } + + const std::vector& stack() const { + return stack_; + } + + void setStack(const std::vector& stack) { + stack_ = stack; + } + + uint64_t fwdThreadId() const { + return fwd_thread_id_; + } + + void setFwdThreadId(uint64_t fwd_thread_id) { + fwd_thread_id_ = fwd_thread_id; + } + + uint8_t scope() const { + return scope_; + } + + void setScope(uint8_t scope) { + scope_ = scope; + } + + const std::unordered_map& extraArgs() const { + return extra_args_; + } + + void setExtraArgs(std::unordered_map&& save_args) { + extra_args_ = std::move(save_args); + } + + uint64_t flops() { + return flops_; + } + + bool isAsync() { + return is_async_; + } + + void setFlops(uint64_t flops) { + flops_ = flops; + } + + private: + // signed to allow for negative intervals, initialized for safety. + int64_t cpu_ns_ = 0; + at::StringView name_; + EventKind kind_; + uint64_t thread_id_; + uint64_t fwd_thread_id_{0}; + at::RecordFunctionHandle handle_{0}; + std::vector> shapes_; + int64_t cpu_memory_usage_ = 0; + int64_t cuda_memory_usage_ = 0; + c10::DeviceIndex device_ = -1; + torch::profiler::impl::ProfilerVoidEventStub cuda_event = nullptr; + int64_t node_id_ = 0; + bool is_remote_ = false; + int64_t cuda_us_ = -1; + int64_t sequence_nr_ = -1; + bool is_async_ = false; + + std::vector stack_; + uint8_t scope_{0}; + uint64_t correlation_id_{0}; + // Extra arguments for computing op flops + std::unordered_map extra_args_; + uint64_t flops_ = 0; +}; + +// a linked-list of fixed sized vectors, to avoid +// a std::vector resize from taking a large amount of time inside +// a profiling event +struct RangeEventList { + RangeEventList() { + events_.reserve(kReservedCapacity); + } + + template + void record(Args&&... args) { + std::lock_guard guard(mutex_); + events_.emplace_back(std::forward(args)...); + } + + std::vector consolidate() { + std::lock_guard lock(mutex_); + std::vector result; + result.insert( + result.begin(), + std::make_move_iterator(events_.begin()), + std::make_move_iterator(events_.end())); + events_.erase(events_.begin(), events_.end()); + return result; + } + + size_t size() { + std::lock_guard lock(mutex_); + return events_.size(); + } + + private: + // This mutex is used to serialize access when different threads are writing + // to the same instance of RangeEventList. + std::mutex mutex_; + std::vector events_; + + static const size_t kReservedCapacity = 1024; +}; + +// A struct to control settings of disableProfiler options. +struct TORCH_API ProfilerDisableOptions { + ProfilerDisableOptions() = default; + ProfilerDisableOptions(bool shouldCleanupTLSState, bool shouldConsolidate) + : cleanupTLSState(shouldCleanupTLSState), + consolidate(shouldConsolidate) {} + // Whether we should clean up profiler states that are thread local, such as + // ThreadLocalDebugInfo and thread local RecordFunction callbacks. + bool cleanupTLSState = true; + // Whether we should consolidate all currently recorded profiled events. If + // false, will not consolidate and other threads can continue to write to the + // event lists. + bool consolidate = true; +}; + +// NOTE: profiler mode is thread local, with automatic propagation +// across thread boundary (e.g. at::launch tasks) +TORCH_API void enableProfilerLegacy( + const torch::profiler::impl::ProfilerConfig&); +using thread_event_lists = std::vector>; +TORCH_API thread_event_lists disableProfilerLegacy( + c10::optional profilerDisableOptions = + c10::nullopt); + +// adds profiledEvents to the current thread local recorded events. Each event +// will be marked with node ID given by fromNodeId. +TORCH_API void addEventList(std::vector&& profiledEvents); +// Writes profiled events to a stream. +TORCH_API void writeProfilerEventsToStream( + std::ostream& out, + const std::vector& events); + +// Usage: +// { +// RecordProfile guard("filename.trace"); +// // code you want to profile +// } +// Then open filename.trace in chrome://tracing +struct TORCH_API RecordProfile { + RecordProfile(std::ostream& out); + RecordProfile(const std::string& filename); + + ~RecordProfile(); + + private: + void init(); + std::unique_ptr file_; + std::ostream& out_; + void processEvents(const std::vector& events); +}; + +// A guard that enables the legacy profiler, taking in an optional callback to +// process the results Usage: +// { +// TLSLegacyProfilerGuard g([](thread_event_lists profilerResults) { +// // process profilerResults +// }); +// Code to profile +// } +struct TORCH_API TLSLegacyProfilerGuard { + explicit TLSLegacyProfilerGuard( + const torch::profiler::impl::ProfilerConfig& cfg, + c10::optional> + resultCallback = c10::nullopt, + c10::optional profilerDisableOptions = + c10::nullopt) + : cb_(std::move(resultCallback)), + profilerDisableOptions_(profilerDisableOptions) { + enableProfilerLegacy(cfg); + } + ~TLSLegacyProfilerGuard() { + thread_event_lists event_lists = + disableProfilerLegacy(profilerDisableOptions_); + if (cb_) { + try { + (*cb_)(event_lists); + } catch (const std::exception& e) { + LOG(ERROR) << "Got error processing profiler events: " << e.what(); + } + } + } + + private: + c10::optional> cb_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const c10::optional profilerDisableOptions_; +}; + +} // namespace profiler +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_anomaly_mode.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_anomaly_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..df4b12df863b1a2569e679a87282900474dc8678 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_anomaly_mode.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace autograd { + +struct PyAnomalyMetadata : public AnomalyMetadata { + static constexpr const char* ANOMALY_TRACE_KEY = "traceback_"; + static constexpr const char* ANOMALY_PARENT_KEY = "parent_"; + + PyAnomalyMetadata() { + pybind11::gil_scoped_acquire gil; + // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer) + dict_ = PyDict_New(); + } + ~PyAnomalyMetadata() override { + // If python is already dead, leak the wrapped python objects + if (Py_IsInitialized()) { + pybind11::gil_scoped_acquire gil; + Py_DECREF(dict_); + } + } + void store_stack() override; + void print_stack(const std::string& current_node_name) override; + void assign_parent(const std::shared_ptr& parent_node) override; + + PyObject* dict() { + return dict_; + } + + private: + PyObject* dict_{nullptr}; +}; +void _print_stack( + PyObject* trace_stack, + const std::string& current_node_name, + bool is_parent); + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_autograd.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..a854d30c895ce7f537727adec3caf3b617d03af0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_autograd.h @@ -0,0 +1,17 @@ +#ifndef THP_AUTOGRAD_H +#define THP_AUTOGRAD_H + +PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused); +void THPAutograd_initFunctions(); + +namespace torch::autograd { + +PyMethodDef* python_functions(); + +} + +#include +#include +#include + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_enum_tag.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_enum_tag.h new file mode 100644 index 0000000000000000000000000000000000000000..060a8ca21697ed7b8d49f8e733e3c958d19d0475 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_enum_tag.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +namespace torch::autograd { +void initEnumTag(PyObject* module); +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_legacy_variable.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_legacy_variable.h new file mode 100644 index 0000000000000000000000000000000000000000..f4beb768f426e4fe1f6345cc0d8c75b4ca19b941 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_legacy_variable.h @@ -0,0 +1,12 @@ +#pragma once + +// Instantiates torch._C._LegacyVariableBase, which defines the Python +// constructor (__new__) for torch.autograd.Variable. + +#include + +namespace torch::autograd { + +void init_legacy_variable(PyObject* module); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nested_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nested_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..73197504c4e7b814cc1fa8bd9880a322b8a8bdb2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nested_functions.h @@ -0,0 +1,9 @@ +#pragma once + +namespace torch::autograd { + +PyMethodDef* get_nested_functions_manual(); + +void initNestedFunctions(PyObject* module); + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nn_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nn_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..54dc6e1b293b1f48fc21af870bf9708ae903264e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_nn_functions.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::autograd { + +void initNNFunctions(PyObject* module); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_sparse_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_sparse_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..d97018c51981c79a0ebc4011ff170bd240759a07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_sparse_functions.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::autograd { + +void initSparseFunctions(PyObject* module); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f0fc8a05efb25d0a1c5baa7009f874b326a827 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable.h @@ -0,0 +1,114 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; + +// Python object that backs torch.autograd.Variable +struct THPVariable { + PyObject_HEAD; + // Payload + c10::MaybeOwned cdata; + // Hooks to be run on backwards pass (corresponds to Python attr + // '_backwards_hooks', set by 'register_hook') + PyObject* backward_hooks = nullptr; + // Hooks to be run in the backwards pass after accumulate grad, + // i.e., after the .grad has been set (corresponds to Python attr + // '_post_accumulate_grad_hooks', set by 'register_post_accumulate_grad_hook') + PyObject* post_accumulate_grad_hooks = nullptr; +}; + +TORCH_PYTHON_API void registerPythonTensorClass( + const std::string& device, + PyObject* python_tensor_class); + +TORCH_PYTHON_API void activateCUDATrace(); + +TORCH_PYTHON_API extern PyObject* THPVariableClass; +TORCH_PYTHON_API extern PyObject* ParameterClass; + +bool THPVariable_initModule(PyObject* module); +TORCH_PYTHON_API PyObject* THPVariable_Wrap(at::TensorBase var); + +static inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) { + // Check that a python object is a `Tensor`, but not a `Tensor` subclass. + // (A subclass could have different semantics.) The one exception is + // Parameter, which is used for Python bookkeeping but is equivalent to + // Tensor as far as C++ is concerned. + return ( + tp == (PyTypeObject*)THPVariableClass || + tp == (PyTypeObject*)ParameterClass); +} + +static inline bool THPVariable_CheckExact(PyObject* obj) { + return THPVariable_CheckTypeExact(Py_TYPE(obj)); +} + +inline bool THPVariable_Check(PyObject* obj) { + if (!THPVariableClass) + return false; + + // Fast path + if (THPVariable_CheckExact(obj)) { + return true; + } + + const auto result = PyObject_IsInstance(obj, THPVariableClass); + if (result == -1) + throw python_error(); + return result; +} + +inline const at::Tensor& THPVariable_Unpack(THPVariable* var) { + return *var->cdata; +} + +inline const at::Tensor& THPVariable_Unpack(PyObject* obj) { + return THPVariable_Unpack(reinterpret_cast(obj)); +} + +std::pair parseIValuesToPyArgsKwargs( + const c10::OperatorHandle& op, + const std::vector& arguments); + +void pushPyOutToStack( + const c10::OperatorHandle& op, + torch::jit::Stack* stack, + py::object out, + const char* msg); + +inline PyObject* THPVariable_WrapList( + const torch::autograd::variable_list& inputs) { + PyObject* pyinput = PyList_New(static_cast(inputs.size())); + for (const auto i : c10::irange(inputs.size())) { + PyList_SET_ITEM(pyinput, i, THPVariable_Wrap(inputs[i])); + } + return pyinput; +} + +inline torch::autograd::variable_list THPVariable_UnpackList( + PyObject* pyresult) { + TORCH_CHECK(PyList_CheckExact(pyresult)); + auto result_len = PyList_GET_SIZE(pyresult); + torch::autograd::variable_list result; + result.reserve(result_len); + for (const auto i : c10::irange(result_len)) { + PyObject* item = PyList_GET_ITEM(pyresult, i); + if (!Py_IsNone(item)) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(THPVariable_Check(item)); + result.emplace_back(THPVariable_Unpack(item)); + } else { + result.emplace_back(); + } + } + return result; +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable_indexing.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable_indexing.h new file mode 100644 index 0000000000000000000000000000000000000000..a0e35a6e9eff905e0211ce150937a3366d303a65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_variable_indexing.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch::autograd { + +struct UnpackedSlice { + c10::SymInt start; + c10::SymInt stop; + c10::SymInt step; +}; + +// This mirrors Cpython's PySlice_Unpack method +static inline UnpackedSlice __PySlice_Unpack(PyObject* _r) { + PySliceObject* r = (PySliceObject*)_r; + /* this is harder to get right than you might think */ + + c10::SymInt start_sym, stop_sym, step_sym; + + auto clip_val = [](Py_ssize_t val) { + if (val < c10::SymInt::min_representable_int()) { + auto r = PyErr_WarnEx( + PyExc_UserWarning, + "Truncating the start/stop/step " + "of slice. This is likely because of " + "saved old models when the start/stop/step were larger.", + 1); + if (r != 0) { + throw python_error(); + } + return (Py_ssize_t)(c10::SymInt::min_representable_int()); + } + return val; + }; + + if (r->step == Py_None) { + step_sym = c10::SymInt(1); + } else { + if (torch::is_symint(r->step)) { + auto step_sym = py::handle(r->step).cast(); + } else { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + Py_ssize_t step; + if (!_PyEval_SliceIndex(r->step, &step)) { + throw python_error(); + } + if (step == 0) { + PyErr_SetString(PyExc_ValueError, "slice step cannot be zero"); + } + + step = clip_val(step); + step_sym = c10::SymInt(step); + } + } + + if (torch::is_symint(r->start)) { + start_sym = py::handle(r->start).cast(); + } else if (r->start == Py_None) { + start_sym = c10::SymInt(step_sym < 0 ? PY_SSIZE_T_MAX : 0); + } else { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + Py_ssize_t start; + if (!_PyEval_SliceIndex(r->start, &start)) { + throw python_error(); + } + start = clip_val(start); + start_sym = c10::SymInt(start); + } + + if (torch::is_symint(r->stop)) { + stop_sym = py::handle(r->stop).cast(); + } else if (r->stop == Py_None) { + stop_sym = c10::SymInt( + step_sym < 0 ? c10::SymInt::min_representable_int() : PY_SSIZE_T_MAX); + } else { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + Py_ssize_t stop; + if (!_PyEval_SliceIndex(r->stop, &stop)) { + throw python_error(); + } + stop = clip_val(stop); + stop_sym = c10::SymInt(stop); + } + + return UnpackedSlice{ + std::move(start_sym), std::move(stop_sym), std::move(step_sym)}; +} + +Py_ssize_t THPVariable_length(PyObject* self); +PyObject* THPVariable_getitem(PyObject* self, PyObject* index); +int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* value); + +Variable valueToTensor( + c10::TensorOptions options, + PyObject* value, + const at::Device& device); + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable_hooks.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable_hooks.h new file mode 100644 index 0000000000000000000000000000000000000000..2bbc8f92d4266025df959183e0da273a866ab703 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable_hooks.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch::autograd { + +struct TORCH_API SavedVariableHooks { + virtual void call_pack_hook(const at::Tensor& tensor) = 0; + virtual at::Tensor call_unpack_hook() = 0; + virtual ~SavedVariableHooks() = default; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h new file mode 100644 index 0000000000000000000000000000000000000000..da655883f3f69db316a23362de485ba412d212b5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace autograd { +namespace utils { + +inline std::string requires_grad_leaf_error(bool requires_grad) { + std::ostringstream oss; + oss << "you can only change requires_grad flags of leaf variables."; + if (requires_grad == false) { + oss << " If you want to use a computed variable in a subgraph " + "that doesn't require differentiation use " + "var_no_grad = var.detach()."; + } + return oss.str(); +} + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h new file mode 100644 index 0000000000000000000000000000000000000000..1dad10663dd70bff2a71631d337e83d71c4614e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h @@ -0,0 +1,80 @@ +#pragma once + +#include + +namespace torch { +namespace autograd { +namespace utils { + +// Helper functions to enforce the "Gradient Layout Contract" described in +// torch/csrc/autograd/functions/accumulate_grad.h. + +// Checks if grad obeys the contract with variable. +inline bool obeys_layout_contract( + const at::Tensor& grad, + const at::Tensor& variable) { + TORCH_INTERNAL_ASSERT(!grad.is_sparse()); + TORCH_INTERNAL_ASSERT(!grad.is_sparse_csr()); + TORCH_INTERNAL_ASSERT(!variable.is_sparse_csr()); + + // NOLINTNEXTLINE(bugprone-branch-clone) + if (variable.is_nested()) { + // TODO: Nested Tensor does not have an implementation of detach. The + // current implementation of nested tensor likely does obey the gradient + // contract and should return true, but this would likely change in the + // future + return false; + } else if (variable.is_sparse()) { + // Gradient Layout Contract is not applicable for sparse layouts + return false; + } else if (variable.is_non_overlapping_and_dense()) { + // Only look at stride for dimensions that are not of size 1. + const auto& grad_sizes = grad.sym_sizes(); + const auto& grad_strides = grad.sym_strides(); + const auto& variable_strides = variable.sym_strides(); + for (const auto idx : c10::irange(grad_sizes.size())) { + if (grad_sizes[idx] != 1) { + if (grad_strides[idx] != variable_strides[idx]) { + return false; + } + } else { + // This should not be needed but we don't check if a Tensor has views + // before stashing it. And 0-strided Tensors of size 1 are actually + // views for ops like cat. + // TODO: Actually detect views in the accumulateGrad function so that + // this Tensor is not considered at all. + if (grad_strides[idx] == 0) { + return false; + } + } + } + return true; + } else { + return grad.is_contiguous(at::MemoryFormat::Contiguous); + } +} + +// Creates a clone of new_grad that obeys the contract with variable. +// The clone should attach to new_grad's history if GradMode::is_enabled(). +inline at::Tensor clone_obey_contract( + const at::Tensor& new_grad, + const at::Tensor& variable) { + if (variable.is_non_overlapping_and_dense()) { + // (1) + // Does this dicey-looking sequence attach the result to new_grad's + // history if GradMode::is_enabled()? Yes, and @alband says it should. + return std::move(new_grad + .new_empty_strided_symint( + variable.sym_sizes(), + variable.sym_strides(), + variable.options().memory_format(c10::nullopt)) + .copy_(new_grad)); + } else { + // (2) + return new_grad.clone(at::MemoryFormat::Contiguous); + } +} + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..ade2389363ebf0cea72b59454851d861dba0fe07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h @@ -0,0 +1,40 @@ +#pragma once + +#include + +namespace torch { +namespace autograd { +namespace utils { + +// Turns lambda into a torch::autograd::FunctionPostHook. +class LambdaPostHook : public torch::autograd::FunctionPostHook { + using variable_list = std::vector; + using fn_type = + std::function; + using compiled_fn_type = std::function; + + public: + // The lambda function takes as arguments the outputs and inputs of the + // autograd function and can modify the outputs of the autograd function by + // returning a new output if needed. + /* implicit */ LambdaPostHook(fn_type fn) : fn_(std::move(fn)) {} + + LambdaPostHook(fn_type fn, compiled_fn_type compiled_fn) + : fn_(std::move(fn)), compiled_fn_(std::move(compiled_fn)) {} + + variable_list operator()( + const variable_list& outputs, + const variable_list& inputs) override { + return fn_(outputs, inputs); + } + + void compiled_args(CompiledNodeArgs& args) override {} + + protected: + std::function fn_; + compiled_fn_type compiled_fn_; +}; + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h new file mode 100644 index 0000000000000000000000000000000000000000..7701e97fe9189cb49a2c98cfa5cc1f5e41190941 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace autograd { +namespace utils { + +// The parameter allow_copy is to accept copy for Tensor.to (and by proxy +// PackedSequences.to) but not nn.Module.to. +inline std::tuple< + c10::optional, + c10::optional, + bool, + bool, + c10::optional> +parse_to_conversion(PythonArgs& r, bool allow_copy) { + if (r.idx == 0) { + if (!allow_copy && !r.isNone(3)) + throw std::runtime_error(".to() does not accept copy argument"); + return std::make_tuple( + r.deviceOptional(0), + r.scalartypeOptional(1), + r.toBool(2), + r.toBool(3), + r.memoryformatOptional(4)); + } else if (r.idx == 1) { + if (!allow_copy && !r.isNone(2)) + throw std::runtime_error(".to() does not accept copy argument"); + return std::make_tuple( + c10::nullopt, + r.scalartype(0), + r.toBool(1), + r.toBool(2), + r.memoryformatOptional(3)); + } else { + auto tensor = r.tensor(0); + if (!allow_copy && !r.isNone(2)) + throw std::runtime_error(".to() does not accept copy argument"); + return std::make_tuple( + tensor.device(), + tensor.scalar_type(), + r.toBool(1), + r.toBool(2), + r.memoryformatOptional(3)); + } +} +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h new file mode 100644 index 0000000000000000000000000000000000000000..92e3c3611eadd18abc0ce10dbb481f61d83d2c80 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h @@ -0,0 +1,28 @@ +#pragma once +#include + +#include +#include + +namespace torch { +namespace autograd { +namespace utils { + +// Warning handler for multi-threaded contexts. Gather warnings from +// all threads into a single queue, then process together at the end +// in the main thread. +class DelayWarningHandler : public at::WarningHandler { + public: + ~DelayWarningHandler() override = default; + void replay_warnings(); + + private: + void process(const c10::Warning& warning) override; + + std::vector warnings_; + std::mutex mutex_; +}; + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/wrap_outputs.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/wrap_outputs.h new file mode 100644 index 0000000000000000000000000000000000000000..ac79ac27efeb6d28be355a63c5bb97563b3cb742 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/wrap_outputs.h @@ -0,0 +1,155 @@ +#pragma once + +// Wrap tensor operation outputs as PyObject* + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace autograd { +namespace utils { + +inline PyObject* wrap(bool value) { + if (value) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +inline PyObject* wrap(c10::DeviceIndex value) { + return THPUtils_packDeviceIndex(value); +} + +inline PyObject* wrap(int64_t value) { + return THPUtils_packInt64(value); +} + +inline PyObject* wrap(double value) { + return PyFloat_FromDouble(value); +} + +inline PyObject* wrap(c10::complex value) { + // I could probably also use FromComplex with a reinterpret cast, + // but... eh. + return PyComplex_FromDoubles(value.real(), value.imag()); +} + +inline PyObject* wrap(void* value) { + return THPUtils_packInt64(reinterpret_cast(value)); +} + +inline PyObject* wrap(THPDtype* dtype) { + Py_INCREF(dtype); + return (PyObject*)dtype; +} + +inline PyObject* wrap(at::ScalarType scalarType) { + return wrap(getTHPDtype(scalarType)); +} + +inline PyObject* wrap(THPLayout* layout) { + Py_INCREF(layout); + return (PyObject*)layout; +} + +inline PyObject* wrap(at::Layout layout) { + return wrap(getTHPLayout(layout)); +} + +inline PyObject* wrap(at::Tensor tensor) { + return THPVariable_Wrap(Variable(std::move(tensor))); +} + +inline PyObject* wrap(const at::Scalar& scalar) { + return wrap(scalar_to_tensor(scalar)); +} + +inline PyObject* wrap(at::QScheme qscheme) { + auto* thp_qscheme = torch::utils::getTHPQScheme(qscheme); + Py_INCREF(thp_qscheme); + return thp_qscheme; +} + +inline PyObject* wrap(at::TensorList tl) { + auto r = THPObjectPtr{PyTuple_New(tl.size())}; + if (!r) + throw python_error(); + for (const auto i : c10::irange(tl.size())) { + PyTuple_SET_ITEM(r.get(), i, wrap(tl[i])); + } + return r.release(); +} + +inline PyObject* wrap(at::IntArrayRef list) { + auto r = THPObjectPtr{PyTuple_New(list.size())}; + if (!r) + throw python_error(); + for (const auto i : c10::irange(list.size())) { + PyTuple_SET_ITEM(r.get(), i, wrap(list[i])); + } + return r.release(); +} + +inline PyObject* wrap(at::Stream stream) { + return THPStream_Wrap(stream); +} + +namespace detail { +template +void apply_with_idx_impl( + const F& f, + Tuple& t, + std::index_sequence /*indices*/) { + (void)std::initializer_list{(f(std::get(t), Is), 0)...}; +} + +// For tuple(a, b, c), calls f(a, 0), f(b, 1), f(c, 2) +template +void apply_with_idx(const F& f, std::tuple& t) { + apply_with_idx_impl(f, t, std::index_sequence_for{}); +} +} // namespace detail + +template +PyObject* wrap(std::tuple values) { + auto r = THPObjectPtr{PyTuple_New(sizeof...(Ts))}; + if (!r) + throw python_error(); + detail::apply_with_idx( + [&](auto& value, size_t idx) { + PyTuple_SET_ITEM(r.get(), idx, wrap(std::move(value))); + }, + values); + return r.release(); +} + +template +PyObject* wrap(PyTypeObject* type, std::tuple values) { + auto r = THPObjectPtr{PyStructSequence_New(type)}; + if (!r) + throw python_error(); + detail::apply_with_idx( + [&](auto& value, size_t idx) { + PyStructSequence_SET_ITEM(r.get(), idx, wrap(std::move(value))); + }, + values); + return r.release(); +} + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/variable.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/variable.h new file mode 100644 index 0000000000000000000000000000000000000000..4fa2a1749bed4450d7e620be30b9b4e438882c92 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/variable.h @@ -0,0 +1,948 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch::autograd { + +/// `Variable` is exactly the same as `Tensor` (i.e. we have `using Variable = +/// at::Tensor`). This means you can perform all the usual mathematical and +/// other operations you can perform on `Tensor`s also on `Variable`s. +/// +/// The only reason we are keeping the `Variable` class is backward +/// compatibility with external user's legacy C++ frontend code. Our intention +/// is to eliminate the `Variable` class in the near future. +using Variable = at::Tensor; + +} // namespace torch::autograd + +// The following are all internal APIs and should not be shown in libtorch docs. +// Therefore, we wrap the following code with `#ifndef DOXYGEN_SHOULD_SKIP_THIS +// ... #endif` + +#ifndef DOXYGEN_SHOULD_SKIP_THIS + +namespace torch::autograd { + +/// Check if this type is supported by the autograd engine. +/// If you change this, update the doc at the top of the +/// torch/autograd/__init__.py file and +/// "test_set_requires_grad_only_for_continuous_types" in test/test_autograd.py +static inline bool isDifferentiableType(at::ScalarType t) { + return isFloatingType(t) || isComplexType(t); +} + +struct Node; + +///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// Variable +///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// A `Variable` augments a `Tensor` with the ability to interact in our +/// autograd machinery. Conceptually, `Variable`s travel along `Edge`s between +/// `Node`s in the autograd graph. A `Variable` can either be a leaf, like a +/// weight in a neural network, or an interior variable, when it is the result +/// of an operation between variables. Every `Variable` also stores another +/// `Variable` called its `grad` (gradient). If the variable is a leaf, its +/// gradient will be accumulated into this variable. +/// +/// Every Tensor is a Variable, but sometimes we colloquially refer to Variables +/// that don't require gradients as Tensors (since none of the autograd +/// machinery for Variables applies). Historically, Variables and Tensors +/// were separate concepts, but now they are exactly the same (i.e. we have +/// `using Variable = at::Tensor`). +/// +/// Gradient Edges +///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// Furthermore, `Variable`s have the notion of a `gradient_edge`, which is the +/// edge in the autograd graph that connects the variable to a particular input +/// of the gradient function that will be invoked with the variable during the +/// backward pass. More precisely, this gradient function can be one of two +/// things: +/// 1. A `grad_fn`, if the variable is in the interior of the graph. This is the +/// gradient of the function that produced the variable. +/// 2. A `grad_accumulator`, if the variable is a leaf, which accumulates a +/// scalar gradient value into its `grad` variable. +/// +/// Versioning +///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// Another major feature of `Variable`s are *versions*. Versions are +/// incremented when an in-place mutation of a variable occurs. Versions are +/// useful when constructing `SavedVariable`s, which take a snapshot of a +/// `Variable` at a certain version. You can retrieve a `Variable`'s version +/// through its `current_version()` method. +/// +/// Views +///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// It is possible for a `Variable` to be a *view* of another `Variable`, in +/// which case it tracks that `Variable`'s data and autograd history. Beyond +/// construction, the interface of a view is identical to that of a regular +/// `Variable`. You can determine whether `Variable` is in fact a view by +/// probing its `is_view()` method. Note that the *view* semantics are only +/// meaningful for `Variable` relations that are relevant to autograd. +/// See NOTE [ Autograd View Variables ] for more details. +///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +struct AutogradMeta; +struct DifferentiableViewMeta; + +// Private-ish functions for manipulating variables; we don't want to put them +// on Tensor proper +namespace impl { + +// WARNING: This may return a nullptr. If you require AutogradMeta to return +// a materialized structure, use materialize_autograd_meta instead. +TORCH_API AutogradMeta* get_autograd_meta(const at::TensorBase&); + +// WARNING: This will return a nullptr if the Tensor is not a view. +TORCH_API DifferentiableViewMeta* get_view_autograd_meta(const at::TensorBase&); + +// Returns the current autograd meta, materializing it if it was previously +// none. This counts as a *mutating* operation, so do not call it on +// "read-only" operators; in particular, this is NOT thread safe +TORCH_API AutogradMeta* materialize_autograd_meta(const at::TensorBase&); + +/// Set the gradient accumulator of the `Variable`. This is only applicable to +/// leaf variables. Interior variables should call `set_gradient_edge()`. +TORCH_API void set_grad_accumulator( + const Variable&, + std::weak_ptr grad_accumulator); + +/// Attempts to get a pointer to the gradient accumulator of the `Variable`, +/// if it still exists. If the gradient accumulator function has been +/// destroyed, returns a `nullptr`. +TORCH_API std::shared_ptr try_get_grad_accumulator(const Variable&); + +/// Gets the gradient accumulator of the `Variable` if it has one, or else +/// create one on the fly and return it. +TORCH_API std::shared_ptr grad_accumulator(const Variable&); + +/// Returns the "canonical" gradient edge of this `Variable`, i.e. either the +/// gradient function if this is an interior `Variable`, or the gradient +/// accumulator otherwise. If the `Variable` is interior, the returned `Edge` +/// will store the input index of the `Node` to which this variable is +/// connected in its `input_nr` field. For leaves, the `input_nr` is always +/// zero. Note that `set_gradient_edge` and `gradient_edge` are not +/// symmetric. You must use `set_gradient_edge` to set the `grad_fn` and +/// `set_grad_accumulator` to set the accumulator. +TORCH_API Edge gradient_edge(const Variable&); + +/// Set the gradient edge -- i.e. `grad_fn` and `input_nr` -- of the +/// `Variable`. +/// NOTE: This will always set the `grad_fn`, even if this is a leaf variable, +/// and never the `grad_accumulator`. For the latter, use +/// `set_grad_accumulator`. This allows late construction of an interior +/// `Variable`. +TORCH_API void set_gradient_edge(const Variable&, Edge edge); + +// Autograd Graph Interaction +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Update the `grad_fn` of an existing Variable. Called after in-place +/// modifications. +/// +/// For View Variables: +/// Called after in-place modifications. Modifies the grad_fn of the base +/// Variable. +TORCH_API void rebase_history(const Variable&, Edge gradient_edge); + +/// Gets the raw gradient function pointer, whatever it currently is. +TORCH_API Node* grad_fn_unsafe(const Variable&); + +/// Increments the version count of this `Variable`. +TORCH_API void bump_version(const Variable&); +TORCH_API void set_version_counter( + const Variable&, + const c10::VariableVersion& version_counter); + +/// Retrieves this `Variable`s version counter. +TORCH_API const c10::VariableVersion& version_counter(const Variable&); + +TORCH_API void set_name(const Variable&, const std::string& name); + +TORCH_API void add_hook( + const at::TensorBase&, + std::unique_ptr hook); +TORCH_API std::vector>& hooks(const Variable&); +TORCH_API void clear_hooks(const at::TensorBase&); + +TORCH_API void set_post_acc_grad_hooks( + const at::TensorBase&, + std::unique_ptr dict); +TORCH_API std::unique_ptr& post_acc_grad_hooks( + const Variable&); + +TORCH_API void create_cpp_hook( + const at::TensorBase&, + bool is_retains_grad_hooks = false); +} // namespace impl + +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// AutogradMeta +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Each `Variable` has one unique `AutogradMeta` struct, which stores autograd +/// metadata fields that are necessary for tracking the Variable's autograd +/// history. As an optimization, a Variable may store a nullptr, in lieu of a +/// default constructed AutogradMeta. + +struct TORCH_API AutogradMeta : public c10::AutogradMetaInterface { + std::string name_; + + Variable grad_; + std::shared_ptr grad_fn_; + std::weak_ptr grad_accumulator_; + + // This field is used to store all the forward AD gradients + // associated with this AutogradMeta (and the Tensor it corresponds to) + // There is a semantic 1:1 correspondence between AutogradMeta and + // ForwardGrad but: + // - This field is lazily populated. + // - This field is a shared_ptr but it must never be + // shared by multiple Tensors. See Note [ Using ForwardGrad ] + // Any transition from not_initialized to initialized + // must be protected by mutex_ + mutable std::shared_ptr fw_grad_; + + // The hooks_ field is actually reused by both python and cpp logic + // For both cases, we have a data structure, cpp_hooks_list_ (cpp) + // or dict (python) which is the canonical copy. + // Then, for both cases, we always register a single hook to + // hooks_ which wraps all the hooks in the list/dict. + // And, again in both cases, if the grad_fn exists on that tensor + // we will additionally register a single hook to the grad_fn. + // + // Note that the cpp and python use cases aren't actually aware of + // each other, so using both is not defined behavior. + std::vector> hooks_; + std::shared_ptr cpp_hooks_list_; + + // The post_acc_grad_hooks_ field stores only Python hooks + // (PyFunctionTensorPostAccGradHooks) that are called after the + // .grad field has been accumulated into. This is less complicated + // than the hooks_ field, which encapsulates a lot more. + std::unique_ptr post_acc_grad_hooks_ = nullptr; + + // Only meaningful on leaf variables (must be false otherwise) + bool requires_grad_{false}; + + // Only meaningful on non-leaf variables (must be false otherwise) + bool retains_grad_{false}; + + bool is_view_{false}; + + // The "output number" of this variable; e.g., if this variable + // was the second output of a function, then output_nr == 1. + // We use this to make sure we can setup the backwards trace + // correctly when this variable is passed to another function. + uint32_t output_nr_; + + // Mutex to ensure that concurrent read operations that modify internal + // state are still thread-safe. Used by grad_fn(), grad_accumulator(), + // fw_grad() and set_fw_grad() + // This is mutable because we need to be able to acquire this from const + // version of this class for the functions above + mutable std::mutex mutex_; + + /// Sets the `requires_grad` property of `Variable`. This should be true for + /// leaf variables that want to accumulate gradients, and false for all other + /// variables. + void set_requires_grad(bool requires_grad, at::TensorImpl* self_impl) final { + TORCH_CHECK( + !requires_grad || + isDifferentiableType(at::typeMetaToScalarType(self_impl->dtype())), + "Only Tensors of floating point and complex dtype can require gradients"); + requires_grad_ = requires_grad; + } + + bool requires_grad() const override { + return requires_grad_ || grad_fn_; + } + + /// Accesses the gradient `Variable` of this `Variable`. + Variable& mutable_grad() override { + return grad_; + } + + const Variable& grad() const override { + return grad_; + } + + const Variable& fw_grad(uint64_t level, const at::TensorBase& self) + const override; + + void set_fw_grad( + const at::TensorBase& new_grad, + const at::TensorBase& self, + uint64_t level, + bool is_inplace_op) override; + + AutogradMeta( + at::TensorImpl* self_impl = nullptr, + bool requires_grad = false, + Edge gradient_edge = Edge()) + : grad_fn_(std::move(gradient_edge.function)), + + output_nr_(gradient_edge.input_nr) { + // set_requires_grad also checks error conditions. + if (requires_grad) { + TORCH_INTERNAL_ASSERT(self_impl); + set_requires_grad(requires_grad, self_impl); + } + TORCH_CHECK( + !grad_fn_ || !requires_grad_, + "requires_grad should be false if grad_fn is set"); + } + + ~AutogradMeta() override { + // If AutogradMeta is being destroyed, it means that there is no other + // reference to its corresponding Tensor. It implies that no other thread + // can be using this object and so there is no need to lock mutex_ here to + // guard the check if fw_grad_ is populated. + if (fw_grad_) { + // See note [ Using ForwardGrad ] + fw_grad_->clear(); + } + } +}; + +/// Base class for view functions, providing reapplication of a view on a new +/// base. Each view op should get a codegenerated subclass of this class +/// containing any state needed to reconstruct the view. The class also provides +/// convenience accessors for saved SymInts / tensor state. This is useful for +/// e.g. fake-ification, where we want to use symbolic values or fake tensors +/// instead. +struct TORCH_API ViewFunc { + virtual ~ViewFunc() {} + /// Returns any SymInts in the saved state. + virtual std::vector get_symints() const { + return {}; + } + /// Returns the number of SymInts in the saved state. + virtual size_t num_symints() const { + return 0; + } + /// Returns any tensors in the saved state. + virtual std::vector get_tensors() const { + return {}; + } + /// Returns the number of tensors in the saved state. + virtual size_t num_tensors() const { + return 0; + } + /// Reapplies the view on the given base using the saved state. + virtual at::Tensor operator()(const at::Tensor&) const = 0; + /// Returns a clone of this ViewFunc, optionally with the specified saved + /// state. + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const = 0; + + protected: + /// Sets the values of any SymInts in the saved state. The input vector size + /// must match the number of SymInts in the saved state (i.e. the size of the + /// list returned by get_symints()). + virtual void set_symints(std::vector) {} + /// Sets the values of any Tensors in the saved state. The input vector size + /// must match the number of Tensors in the saved state (i.e. the size of the + /// list returned by get_tensors()). + virtual void set_tensors(std::vector) {} +}; + +/// ViewFunc that represents a chain of two ViewFuncs. +struct ChainedViewFunc : public ViewFunc { + ChainedViewFunc( + std::unique_ptr first, + std::unique_ptr second) + : first(std::move(first)), second(std::move(second)) {} + virtual ~ChainedViewFunc() override{}; + virtual std::vector get_symints() const override; + virtual size_t num_symints() const override { + return first->num_symints() + second->num_symints(); + } + virtual std::vector get_tensors() const override; + virtual size_t num_tensors() const override { + return first->num_tensors() + second->num_tensors(); + } + virtual at::Tensor operator()(const at::Tensor&) const override; + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override; + + private: + std::unique_ptr first; + std::unique_ptr second; +}; + +/// ViewFunc that errors with a specified error message when called. +struct ErroringViewFunc : public ViewFunc { + ErroringViewFunc(const std::string& error_msg) : error_msg(error_msg) {} + virtual ~ErroringViewFunc() override{}; + virtual at::Tensor operator()(const at::Tensor&) const override { + TORCH_CHECK(false, error_msg); + } + virtual std::unique_ptr clone_and_set( + std::optional> = c10::nullopt, + std::optional> = c10::nullopt) const override { + return std::make_unique(error_msg); + } + + private: + std::string error_msg; +}; + +struct TORCH_API ViewInfo { + /// The base `Variable` + /// If this ViewInfo represents a forward (respectively backward) AD gradient, + /// then this Tensor cannot be a forward (respectively backward) view. + Variable base_; + + /// By default we use as_strided to recover views which is more efficient. + /// view_fn is only saved when as_strided is not supported. + /// If view_fn has value, we use it to recover views in backward. + std::unique_ptr view_fn_; + + /// Analogue of view_fn but in reverse: given a view -> produce the base by + /// applying the inverse view. + std::function rev_view_fn_; + + /// Accessors for the view function + bool has_view_fn() const { + // assume either BOTH or NEITHER of view_fn_ and rev_view_fn_ exist + return view_fn_ != nullptr; + } + + const ViewFunc& view_fn() const { + TORCH_CHECK( + has_view_fn(), "Can only access the view function if it exists."); + return *view_fn_; + } + + std::function rev_view_fn() const { + TORCH_CHECK( + has_view_fn(), + "Can only access the reverse view function if it exists."); + return rev_view_fn_; + } + + /// The chain function can be used to build a new ViewInfo for a + /// differentiable view function. It will return a new view info that + /// accurately represents how "tensor" is a view of this instance's "base_". + /// The "base" and "tensor" are respectively the input and output of the + /// differentiable view function that happened. They are required to properly + /// set the optional view_fn_ when it is not provided. The "view_func", if + /// provided, should be a function that allows to re-do the view between + /// "base" and "tensor". + ViewInfo chain( + const Variable& base, + const Variable& tensor, + std::unique_ptr view_func = nullptr, + std::function rev_view_func = nullptr) const; + + ViewInfo( + Variable base, + std::unique_ptr view_fn, + std::function rev_view_fn) + : base_(std::move(base)), + view_fn_(std::move(view_fn)), + rev_view_fn_(std::move(rev_view_fn)) { + TORCH_CHECK(base_.defined(), "base is undefined"); + } +}; + +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// DifferentiableViewMeta +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// NOTE [ Autograd View Variables ] +/// +/// Many operations return Variable that shares storage with an input Variable. +/// The returned Variable is called a **view** Variable on the input **base** +/// Variable. +/// +/// In PyTorch, we have two types of views: differentiable views, and +/// non-differentiable views. In either type, to support proper version +/// checking, the base and view Variables must always share the same +/// version_counter. +/// +/// +/// Differentiable Views +/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// This class allows to track both forward and backward AD differentiable +/// views. These views can have different base as non-differentiable view for +/// forward and backward mode AD are not the same. +/// +/// Most function are either both forward and backward differentiable views (for +/// example: view, select, narrow, transpose, etc) or both not forward and not +/// backward differentiable views (for example: indices, values, eq, lt, etc). +/// But there are also functions that are forward but not backward +/// differentiable views (only detach for now) or functions that are backward +/// but not forward differentiable view (only make_dual and unpack dual for +/// now). +/// +/// A concrete example of two views with different bases is as follow: +/// +/// # Have: +/// # dual is a dual Tensor that is neither a forward or backward view +/// detached_dual = dual.detach() +/// view = detached_dual.view_as(dual) +/// # The forward base of view is dual +/// # The backward base of view is detached_dual +/// +/// - Backward Mode View +/// Differentiable views are the view variables where you want gradients to flow +/// back to the base variables. Out-of-place operations on views are quite +/// straightforward, but in-place ones are very tricky. Even if the base +/// variable may not require grad when we create the view, we still need to +/// track the view relation because future in-place ops may require back-proping +/// through it. For example, we need to support +/// +/// (1) in-place operation on view, e.g., +/// +/// # Have: +/// # base.requires_grad = False +/// # var.requires_grad = True +/// base[1] = var # i.e., base[1].copy_(var) +/// torch.autograd.grad(base.sum(), var) <- should return an all ones +/// tensor +/// +/// (2) in-place operation on base after view is created, e.g., +/// +/// # Have: +/// # base.requires_grad = False +/// # var.requires_grad = True +/// view = base[1] +/// base.copy_(var) +/// torch.autograd.grad(view.sum(), var) <- should return a tensor with +/// var[1] filled with all ones and +/// zeros everywhere else +/// +/// - Forward Mode View +/// Forward differentiable views follow the same semantic as backward ones but +/// show up differently as they are computed along with the forward evaluation. +/// The hard examples above are thus very similar +/// +/// (1) in-place operation on view, e.g., +/// +/// # Have: +/// # base is a regular Tensor +/// # var is a dual Tensor whose tangent is all ones +/// base[1] = var # i.e., base[1].copy_(var) +/// # Now, base is a dual Tensor +/// _, fw_grad = fwAD.unpack_dual(base) <- fw_grad should be a tensor with +/// fw_grad[1] filled with all ones +/// and zeros everywhere else +/// +/// (2) in-place operation on base after view is created, e.g., +/// +/// # Have: +/// # base is a regular Tensor +/// # var is a dual Tensor whose tangent is all ones +/// view = base[1] +/// base.copy_(var) +/// _, fw_grad = fwAD.unpack_dual(view) <- fw_grad should be an all ones +/// tensor +/// +/// See Note [Forward Grad View/inplace] for more details on how we handle these +/// hard cases. +/// +/// +/// DifferentiableViewMeta is created to support gradient tracking of +/// such **in-place** operations. In particular, +/// + if an in-place op is done on base, the grad_fn field of the view may +/// become stale. So accesses should always go through grad_fn(), which +/// reconstructs an updated grad_fn if the version_counter has incremented. +/// All other fields are always valid. +/// + if an in-place op is done on view, in rebase_history() of view, which is +/// called after every in-place op in VariableType.cpp, the grad_fn of base +/// is updated. +/// + if a single autograd Node returns multiple differentiable views, if any +/// output is modified by an inplace operation, the autograd engine will +/// make an equivalent graph (corresponding to the view operations) without +/// using equivalent graph, where each output is treated as if it were +/// produced by a distinct view operation. This discards the original (e.g., +/// user provided) grad_fn. If the provided grad_fn does more than the +/// backward of the view, then the DifferentiableViewMeta must be created +/// with creation_meta= CreationMeta::MULTI_OUTPUT_NODE to prevent the +/// engine from ignoring the provided grad_fn. +/// +/// Interaction with GradMode: +/// The particular case that we consider here is: +/// +/// # Have: +/// # base.requires_grad = True or False +/// with torch.no_grad(): +/// view = base[1] +/// base.requires_grad_() +/// view.copy_(var) +/// torch.autograd.grad(base.sum(), var) <- what should it return? +/// +/// Given that this particular code example is ambiguous and can easily be +/// replace by either moving both inside the no_grad block or both outside, we +/// explicitly forbid it. For now, it is deprecated by a warning. This is +/// achieved by setting creation_meta=CreationMeta::NO_GRAD_MODE for all +/// differentiable views created in no_grad mode. +/// +/// See Note [View + Inplace update for base tensor] +/// and Note [View + Inplace update for view tensor] for the details how +/// autograd handles inplace update with view ops. +/// +/// Non-Differentiable Views +/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// In certain cases, although function outputs share storage with inputs, they +/// will **never** require gradient history tracking. Instead of registering the +/// view relation via DifferentiableViewMeta in autograd, the views will be +/// using usual AutogradMeta and just share the version counters with the base +/// Variables. +/// Such views include: +/// 1. Views created from .detach() +/// 2. Views that are non-differentiable by its nature. +/// E.g., `sparse_tensor.indices()` is a integral view on a (possibly) +/// floating point tensor. +/// See top of `derivatives.yaml` on how to specify that outputs of a +/// function are non-differentiable. +/// These are called non-differentiable views as the gradients do not flow +/// through the view relation. +/// +/// Relevant logic for both differentiable and non-differentiable views is +/// implemented in make_variable_(non_)differentiable_view below, and +/// wrap_output of gen_variable_type.py. + +/// NOTE [ View + Inplace detection ] +/// +/// We want to detect views followed by inplace as they are often forbidden to +/// ensure correctness of the computed gradients. But since we want to only +/// notify the user when both happen, we tag the DifferentiableViewMeta when the +/// view is created via the `make_variable_*_view()` functions. This tag is then +/// checked by the `check_inplace()` function from `VariableTypeUtils.h` that +/// should be called before every inplace operation and to detect cases where +/// other views are modified and this one is rebased by side effect, we also +/// check in the `VariableHooks::grad_fn()`. + +/// Flag that gives more information about when this view was created: +/// - IN_CUSTOM_FUNCTION should be set when the view is created inside a custom +/// autograd Function is returned. +/// - NO_GRAD_MODE should be set when a view in created when GradMode is +/// disabled +/// - MULTI_OUTPUT_NODE should be set when a Node created by codegen code +/// returns +/// multiple differentiable views +/// - Inference_MODE should be set when a view of normal tensor is created in +/// InferenceMode. +/// - DEFAULT is for all other cases +enum class CreationMeta : uint8_t { + DEFAULT, + IN_CUSTOM_FUNCTION, + MULTI_OUTPUT_NODE, + NO_GRAD_MODE, + INFERENCE_MODE +}; + +/// Handles correctly propagating CreationMeta when a new view is created from a +/// previous view. In general, we don't want the new view to be _less_ +/// restrictive than the previous view (it's okay to be _more_ restrictive). A +/// CreationMeta value of DEFAULT is currently the least restrictive, as the +/// behavior for all other CreationMeta values is to error out for in-place ops. +/// A CreationMeta value of INFERENCE_MODE is currently the most restrictive, so +/// it takes precedence in propagation. If this changes, the logic here will +/// need to be updated to properly handle the new semantics. +inline CreationMeta propagate_creation_meta( + CreationMeta prev_view_creation_meta, + CreationMeta new_view_creation_meta) { + return (new_view_creation_meta == CreationMeta::DEFAULT) + ? prev_view_creation_meta + : (prev_view_creation_meta == CreationMeta::INFERENCE_MODE + ? prev_view_creation_meta + : new_view_creation_meta); +} + +/// Unified function to handle error checking when rebase happens +/// indirect=true means that the caller is not doing the inplace, but the +/// inplace happened somewhere else. +TORCH_API void handle_view_on_rebase( + DifferentiableViewMeta* diff_view_meta, + bool indirect = false); + +struct TORCH_API DifferentiableViewMeta : public AutogradMeta { + private: + /// Informations about the views + c10::optional backward_info_; + c10::optional forward_info_; + + // Optimization to reduce the number of ViewInfo we create. + // In the (very common) case where backward_info_ == forward_info_, we only + // populate backward_info_ (that should be used as both the forward and + // backward view information) and set shared_view_info_ = true. Invariants: + // - If shared_view_info_ is false, there is no special constraints on + // backward_info_ and forward_info_ + // - If shared_view_info_ is true, we must have: + // - backward_info_.has_value() == true + // - forward_info_.has_value() == false + bool shared_view_info_; + + /// The two following fields are extra information that we track to ensure + /// that any operation on this backward view is valid. + + /// The value of the version_counter at the time grad_fn was created. The + /// grad_fn field is stale if attr_version_ != + /// version_counter.current_version(). + uint32_t attr_version_; + CreationMeta creation_meta_; + + public: + /// requires_grad is a backward AD field so we only use the view specific + /// logic for backward differentiable views + bool requires_grad() const override { + return requires_grad_ || grad_fn_ || + (has_bw_view() && get_backward_view().base_.requires_grad()); + } + + bool shared_view_info() const { + return shared_view_info_; + } + + bool has_bw_view() const { + return backward_info_.has_value(); + } + + const ViewInfo& get_backward_view() const { + TORCH_CHECK( + has_bw_view(), "backward view info can only exist for backward views."); + return backward_info_.value(); + } + + uint32_t get_attr_version() const { + TORCH_CHECK( + has_bw_view(), "attr_version can only exist for backward views."); + return attr_version_; + } + + void set_attr_version(uint32_t new_attr_version) { + TORCH_CHECK( + has_bw_view(), "attr_version can only exist for backward views."); + attr_version_ = new_attr_version; + } + + CreationMeta get_creation_meta() const { + TORCH_CHECK( + has_bw_view(), "creation_meta can only exist for backward views."); + return creation_meta_; + } + + void set_creation_meta(CreationMeta new_creation_meta) { + TORCH_CHECK( + has_bw_view(), "creation_meta can only exist for backward views."); + creation_meta_ = new_creation_meta; + } + + bool has_fw_view() const { + return shared_view_info_ || forward_info_.has_value(); + } + + const ViewInfo& get_forward_view() const { + TORCH_CHECK( + has_fw_view(), "forward view info can only exist for forward views."); + TORCH_CHECK( + !shared_view_info_ || has_bw_view(), + "forward view info can only exist for forward views."); + return shared_view_info_ ? backward_info_.value() : forward_info_.value(); + } + + DifferentiableViewMeta( + at::TensorImpl* self_impl, + c10::optional backward_info, + c10::optional forward_info, + bool shared_view_info, + CreationMeta creation_meta = CreationMeta::DEFAULT); +}; + +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Variable Implementation +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +// Factory Functions +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a `Variable` that is a *view* of another (*base*) variable. +/// The `gradient_edge` is an optional (gradient_function, input_number) pair. +/// `is_differentiable` is a bool that specifies whether this view is +/// differentiable, i.e., whether the relation should be tracked by autograd. +/// See NOTE [ Autograd View Variables ] for details. + +/// NOTE: `allow_tensor_metadata_change` is set to true by default, because +/// there are a lot of call sites to these factory functions that need to change +/// the variable's size or storage afterwards, and they don't expect the +/// original tensor (where the variable is created from) to be updated. Setting +/// `allow_tensor_metadata_change_` to false by default would unnecessarily +/// prevent those changes from happening and is undesirable. + +// See NOTE [ Autograd View Variables ] for details. +// Differentiable view. Track history with DifferentiableViewMeta. +inline Variable make_variable_differentiable_view( + const at::Tensor& data, + c10::optional backward_info, + c10::optional forward_info, + bool shared_view_info, + CreationMeta creation_meta, + bool allow_tensor_metadata_change = true) { + if (data.defined()) { + TORCH_CHECK( + data.getIntrusivePtr()->autograd_meta() == nullptr, + "Attempted to make a tensor into a differentiable view, but the " + "tensor already had autograd metadata associated with it. If you are " + "using a __torch_dispatch__ mode, the most common cause for this " + "problem is that you used torch.overrides.enable_reentrant_dispatch() " + "improperly; tensors created within the extent of reentrant dispatch " + "MUST NOT be directly returned from __torch_dispatch__; instead, they " + "must be wrapped into fresh tensors that serve as the output. If you " + "are not using wrappers, you probably don't need reentrant dispatch. " + "If this doesn't seem applicable, please file a bug to PyTorch."); + at::TensorImpl* data_impl = data.unsafeGetTensorImpl(); + data_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change); + data_impl->set_autograd_meta(std::make_unique( + data_impl, + std::move(backward_info), + std::move(forward_info), + shared_view_info, + creation_meta)); + return data; + } + return Variable(); +} + +// See NOTE [ Autograd View Variables ] for details. +// Non-differentiable view. Just share version counter. +inline Variable make_variable_non_differentiable_view( + const Variable& base, + const at::Tensor& data, + bool allow_tensor_metadata_change = true) { + if (data.defined()) { + // Currently all of non-differentiable view ops(detach/_indices/_values) + // share the same TensorImpl as their base Tensor. Thus a new TensorImpl + // allocation here is required. + auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach( + /*version_counter=*/impl::version_counter(base), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + data_impl_copy->set_autograd_meta(nullptr); + return Variable(data_impl_copy); + } + return Variable(); +} + +/// Creates a `Variable` from the given `Tensor`, copying its underlying +/// `TensorImpl`. `requires_grad` should be set only for leaves, and determines +/// whether the `Variable` will accumulate gradients. NOTE: `data` must *not* be +/// a `Variable` already. Its dynamic type *must* be `Tensor`. +/// +/// TODO: Eliminate this function as much as possible, as it can be expressed +/// more clearly as detach() or a no-op in most call sites (especially when +/// there is only one use of the variable). +inline Variable make_variable( + at::Tensor data, + bool requires_grad = false, + bool allow_tensor_metadata_change = true) { + if (data.defined()) { + if (data.getIntrusivePtr().use_count() == 1 && + data.getIntrusivePtr()->unique_version()) { + auto data_impl = data.unsafeReleaseIntrusivePtr(); + data_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change); + if (requires_grad) { + data_impl->set_autograd_meta( + std::make_unique(data_impl.get(), requires_grad)); + } else { + data_impl->set_autograd_meta(nullptr); + } + return Variable(std::move(data_impl)); + } else { + auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach( + /*version_counter=*/0, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + if (requires_grad) { + data_impl_copy->set_autograd_meta(std::make_unique( + data_impl_copy.get(), requires_grad)); + } else { + data_impl_copy->set_autograd_meta(nullptr); + } + return Variable(data_impl_copy); + } + } + return Variable(); +} + +/// Creates a `Variable` from the given `Tensor`, copying its underlying +/// `TensorImpl`. `gradient_edge` should be a (function, input_nr) pair +/// specifying the function in the autograd graph, and what particular input of +/// that function, this variable is connected to. +inline Variable make_variable( + const at::Tensor& data, + Edge gradient_edge, + bool allow_tensor_metadata_change = true) { + if (data.defined()) { + auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach( + /*version_counter=*/0, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + data_impl_copy->set_autograd_meta(std::make_unique( + data_impl_copy.get(), false, std::move(gradient_edge))); + return Variable(data_impl_copy); + } + return Variable(); +} + +struct VariableHooks final : at::impl::VariableHooksInterface { + at::TensorBase tensor_data(const at::TensorBase&) const override; + at::TensorBase variable_data(const at::TensorBase&) const override; + const std::shared_ptr& grad_fn( + const at::TensorBase&) const override; + unsigned _register_hook( + const at::TensorBase&, + std::function hook) const override; + void remove_hook(const at::TensorBase&, unsigned pos) const override; + bool is_view(const at::TensorBase&) const override; + const at::TensorBase& base(const at::TensorBase&) const override; + const std::string& name(const at::TensorBase&) const override; + bool is_leaf(const at::TensorBase&) const override; + int64_t output_nr(const at::TensorBase&) const override; + void set_data(const at::TensorBase& self, const at::TensorBase& new_data) + const override; + at::TensorBase data(const at::TensorBase& self) const override; + int64_t _version(const at::TensorBase& self) const override; + void retain_grad(const at::TensorBase& self) const override; + bool retains_grad(const at::TensorBase& self) const override; + void _backward( + const at::Tensor& self, + at::TensorList inputs, + const c10::optional& gradient, + c10::optional keep_graph, + bool create_graph) const override; + void requires_grad_(const at::TensorBase& self, bool _requires_grad) + const override; + void basic_autograd_not_implemented_fallback( + const c10::OperatorHandle& op, + c10::DispatchKeySet dispatch_keys, + torch::jit::Stack* stack) const override; +}; + +namespace utils { + +TORCH_API bool has_same_meta(const Variable& base, const Variable& other); + +} // namespace utils +} // namespace torch::autograd + +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..4d7b689016bd9e5fc5864b6480ed6b1188ed0372 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include +#include + +typedef std::function THPCopyFunction; +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THPCopyInfo { + PyTypeObject* srcType; // Python type of src tensor/storage + THPCopyFunction copy; // copy function + bool non_blocking; // true if copy implements an 'non_blocking' copy + bool broadcast; // true if the copy implements a broadcast copy +}; +typedef std::vector THPCopyList; + +inline bool tryTHPCopy( + const THPCopyList& v, + PyObject* dst, + PyObject* src, + bool non_blocking, + bool broadcast) { + for (auto& i : v) { + if (i.non_blocking == non_blocking && + PyType_IsSubtype(Py_TYPE(src), i.srcType)) { + (i.copy)(dst, src, broadcast); + return true; + } + } + return false; +} + +inline bool THPCopy( + const THPCopyList& v, + PyObject* dst, + PyObject* src, + bool non_blocking, + bool broadcast) { + // NOLINTNEXTLINE(bugprone-branch-clone) + if (tryTHPCopy(v, dst, src, non_blocking, broadcast)) { + return true; + } else if (non_blocking && tryTHPCopy(v, dst, src, false, broadcast)) { + return true; + } + THPUtils_setError( + "copy from %s to %s isn't implemented", + THPUtils_typename(src), + THPUtils_typename(dst)); + return false; +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..502af374ff3d34218927b0d066b692d494a397d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h @@ -0,0 +1,12 @@ +#ifndef PROFILER_ITT_H +#define PROFILER_ITT_H +#include + +namespace torch::profiler { +TORCH_API bool itt_is_available(); +TORCH_API void itt_range_push(const char* msg); +TORCH_API void itt_range_pop(); +TORCH_API void itt_mark(const char* msg); +} // namespace torch::profiler + +#endif // PROFILER_ITT_H diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h new file mode 100644 index 0000000000000000000000000000000000000000..c3a0fa303c7c566d72a45306d4f063bd52accadd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h @@ -0,0 +1,128 @@ +#pragma once +#include +#include +#include +#include +#include + +// `TorchScript` offers a simple logging facility that can enabled by setting an +// environment variable `PYTORCH_JIT_LOG_LEVEL`. + +// Logging is enabled on a per file basis. To enable logging in +// `dead_code_elimination.cpp`, `PYTORCH_JIT_LOG_LEVEL` should be +// set to `dead_code_elimination.cpp` or, simply, to `dead_code_elimination` +// (i.e. `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination`). + +// Multiple files can be logged by separating each file name with a colon `:` as +// in the following example, +// `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination:guard_elimination` + +// There are 3 logging levels available for your use ordered by the detail level +// from lowest to highest. + +// * `GRAPH_DUMP` should be used for printing entire graphs after optimization +// passes +// * `GRAPH_UPDATE` should be used for reporting graph transformations (i.e. +// node deletion, constant folding, etc) +// * `GRAPH_DEBUG` should be used for providing information useful for debugging +// the internals of a particular optimization pass or analysis + +// The default logging level is `GRAPH_DUMP` meaning that only `GRAPH_DUMP` +// statements will be enabled when one specifies a file(s) in +// `PYTORCH_JIT_LOG_LEVEL`. + +// `GRAPH_UPDATE` can be enabled by prefixing a file name with an `>` as in +// `>alias_analysis`. +// `GRAPH_DEBUG` can be enabled by prefixing a file name with an `>>` as in +// `>>alias_analysis`. +// `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there +// is no logging level that is higher than `GRAPH_DEBUG`. + +namespace torch { +namespace jit { + +struct Node; +struct Graph; + +enum class JitLoggingLevels { + GRAPH_DUMP = 0, + GRAPH_UPDATE, + GRAPH_DEBUG, +}; + +TORCH_API std::string get_jit_logging_levels(); + +TORCH_API void set_jit_logging_levels(std::string level); + +TORCH_API void set_jit_logging_output_stream(std::ostream& out_stream); + +TORCH_API std::ostream& get_jit_logging_output_stream(); + +TORCH_API std::string getHeader(const Node* node); + +TORCH_API std::string log_function(const std::shared_ptr& graph); + +TORCH_API ::torch::jit::JitLoggingLevels jit_log_level(); + +// Prefix every line in a multiline string \p IN_STR with \p PREFIX. +TORCH_API std::string jit_log_prefix( + const std::string& prefix, + const std::string& in_str); + +TORCH_API std::string jit_log_prefix( + ::torch::jit::JitLoggingLevels level, + const char* fn, + int l, + const std::string& in_str); + +TORCH_API bool is_enabled( + const char* cfname, + ::torch::jit::JitLoggingLevels level); + +TORCH_API std::ostream& operator<<( + std::ostream& out, + ::torch::jit::JitLoggingLevels level); + +#define JIT_LOG(level, ...) \ + if (is_enabled(__FILE__, level)) { \ + ::torch::jit::get_jit_logging_output_stream() \ + << ::torch::jit::jit_log_prefix( \ + level, __FILE__, __LINE__, ::c10::str(__VA_ARGS__)); \ + } + +// tries to reconstruct original python source +#define SOURCE_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, \ + MSG, \ + "\n", \ + ::torch::jit::log_function(G)); +// use GRAPH_DUMP for dumping graphs after optimization passes +#define GRAPH_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, MSG, "\n", (G)->toString()); +// use GRAPH_UPDATE for reporting graph transformations (i.e. node deletion, +// constant folding, CSE) +#define GRAPH_UPDATE(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_UPDATE, __VA_ARGS__); +// use GRAPH_DEBUG to provide information useful for debugging a particular opt +// pass +#define GRAPH_DEBUG(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_DEBUG, __VA_ARGS__); +// use GRAPH_EXPORT to export a graph so that the IR can be loaded by a script +#define GRAPH_EXPORT(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DEBUG, \ + MSG, \ + "\n\n", \ + (G)->toString(), \ + ""); + +#define GRAPH_DUMP_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DUMP)) +#define GRAPH_UPDATE_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE)) +#define GRAPH_DEBUG_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG)) +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h new file mode 100644 index 0000000000000000000000000000000000000000..a5bb535c9c6fe708bbbf51625182a725425f1dc8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h @@ -0,0 +1,39 @@ +#pragma once +#include +#include +#include + +// `TorchScript` offers a simple optimization limit checker +// that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`. +// The purpose is to limit how many optimization you can make per pass. +// This is useful for debugging any passes. + +// Opt limit checker is enabled on a per file basis (hence per pass). For +// example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set +// to `constant_propagation=` or, simply, to +// `constant_propagation=` where is the number of +// optimizations you want to make for the pass. (i.e. +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation="`). + +// Multiple files can be configured by separating each file name with a colon +// `:` as in the following example, +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=:dead_code_elimination="` + +// You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if +// we haven't reached the optimization limit yet. Otherwise, it will return +// false. Typical usage: + +// if (!JIT_OPT_ALLOWED) { +// GRAPH_DUMP(...); //supplied from jit_log +// return; +// } + +namespace torch { +namespace jit { + +TORCH_API bool opt_limit(const char* pass_name); + +#define JIT_OPT_ALLOWED opt_limit(__FILE__) + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h new file mode 100644 index 0000000000000000000000000000000000000000..7dee9bdb52ad6c460366953f696480140f219fb6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void unprofileGraphInputs(const std::shared_ptr& graph); +TORCH_API void unprofileBlock(Block* start_block); +// Unprofiles all the node outputs in a block. + +TORCH_API void ClearProfilingInformation(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h new file mode 100644 index 0000000000000000000000000000000000000000..481b2aa352107bc74f776b7bcd3bb24251b80c0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace jit { + +// insert GraphExecutor nodes that group together +// subgraphs that are differentiable by the jit's autodiff passes +// threshold - minimum number of nodes that will appear in a block +// returns all differentiable blocks that have been found +TORCH_API std::vector CreateAutodiffSubgraphs( + const std::shared_ptr& graph, + size_t threshold = 2); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..780c11f95a9bb9dcaa4fd07aec92409d0f2cd527 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// If given a top-level graph, DCE will construct do alias analysis that allows +// for "smarter" dead code elimination (we will eliminate mutable ops if we can +// prove the mutated values are not used). Otherwise, we will not allow DCE to +// eliminate mutable ops. +// +// So, prefer to use the graph version if you can. +enum class DCESideEffectPolicy : uint8_t { + // default behavior: dead code elimination will check if a node has side + // effects + // and not delete it if it does. + DONT_DELETE_NODES_WITH_SIDE_EFFECTS, + // with this flag, dead code elimination will not check if a node has side + // effects and treat nodes with side effects like any other node, + // i.e. delete them if their outputs aren't used anywhere. + ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS +}; + +TORCH_API void EliminateDeadCode( + const std::shared_ptr& graph, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); +TORCH_API void EliminateDeadCode( + Block* block, + bool recurse = true, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); + +// Invoke the user-provided callback on all live values before deleting anything +TORCH_API void EliminateDeadCode( + Block* block, + std::function&)> cb, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..159592a6c6672dd2fccf0768496aea1de44f1ff9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +struct Graph; + +// Propagates Device type info throughout the given graph. +TORCH_API bool DeviceTypePropagation(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h new file mode 100644 index 0000000000000000000000000000000000000000..704a0915116286ace337974c449e9a635fca4053 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h @@ -0,0 +1,29 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +struct TORCH_API LinearBNParameters { + at::Tensor linear_w; + at::Tensor linear_b; + at::Tensor bn_rm; + at::Tensor bn_rv; + double bn_eps = 0.0; + at::Tensor bn_w; + at::Tensor bn_b; +}; + +/** + * Given the current weight and bias tensors of a Linear module and parameters + * of the BatchNorm module we're folding with, compute the updated values + * for the weight and bias. + * + * The function is basically copied from torch/nn/utils/fusion.py + */ +TORCH_API std::tuple computeUpdatedLinearWeightAndBias( + const LinearBNParameters& p); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h new file mode 100644 index 0000000000000000000000000000000000000000..d11f288dca343308bf2167c89a3d6b2d0792a569 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +enum class MobileOptimizerType : int8_t { + CONV_BN_FUSION, + INSERT_FOLD_PREPACK_OPS, + REMOVE_DROPOUT, + FUSE_ADD_RELU, + HOIST_CONV_PACKED_PARAMS, + CONV_1D_TO_2D, + VULKAN_AUTOMATIC_GPU_TRANSFER, +}; diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e597da64860be2ad26186e4862ef2db348cbe1ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace jit { +// see .cpp for docs +TORCH_API void RemoveInplaceOps(const std::shared_ptr& graph); + +TORCH_API void ImplicitCastForBinaryInplaceOps(Block* block); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h new file mode 100644 index 0000000000000000000000000000000000000000..b574786c0bb1cf1269816edec2ef5d13980bd5e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void RemoveRedundantProfiles(std::shared_ptr& graph); +TORCH_API void RemoveRedundantProfiles(Block* block, AliasDb& db); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..414d699d2e4cb762d8e759081b761345f5cd55aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace jit { + +// Takes in a TensorExprGraph of static shapes and generalizes the input shapes +// to symbolic dimensions. Dimensions of value 1 will be preserved, otherwise +// dimensions with the same value will be bucketed to the same symbolic shape. +// E.g. Tensor(5, 3), Tensor(3, 1) -> Tensor(SS(-1), SS(-2)), Tensor(SS(-2), 1) +// From there, runs symbolic shape inference on the graph, and creates a +// versioning if in the graph with prim::TensorExprDynamicGuard checking if +// the inputs at runtime match the Generalized Symbolic Shapes that are inputs +// to the TE Kernel. The computate to calculate all symbolic dimensions is +// inlined in to the if block with the TE Kernel. All Sym Dim Value* are +// appended to the end of the TE Kernel Graph/Node inputs, and the Node is +// augmented with a integer list attr `symbolic_shape_inputs` that gives the +// mapping from Value * -> Symbolic Shape int64_t value. For more lengthy IR +// examples and walkthrough look at ShapeAnalysisTest.DynamicShapesFusion in +// `test_shape_analysis` Returns True on Success, False on Failure, can fail if +// shape propagation fails to propagate # of dims or if complete shapes on +// inputs not set + +TORCH_API bool GenerateGuard( + Node* tensorexpr_graph_node, + bool add_composed_op = false); + +TORCH_API void runTensorExprDynamicGroup(const Code& code, Stack& stack); + +enum class StrideInput { + // Tensors natively store whether they are contiguous or not as a property + // this makes it faster to query `is_contiguous` or + // `is_contiguous(memory_format=channels_last)` + // than looping through the sizes/strides yourself + // For tensors with these properties, we only store one value: + TENSOR_CONT, + TENSOR_CONT_CHANNELS_LAST, + // now, we describe other cases, where there is one stride enum + // per dimension + S_ONE, // STRIDE_ONE: packed + S_CONT, // STRIDE_CONTIGUOUS: stride[i + 1] * sizes[i + 1] + S_TRAN_CONT, // STRIDE_TRANSPOSED_CONTIGUOUS: stride[i-1] * sizes[i-1] + S_AS_ARG, // STRIDE_AS_ARG: stride passed in as runtime value +}; + +TORCH_API std::string toString(StrideInput si); +TORCH_API StrideInput strideInputFromString(const std::string& si); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..aa2ab4ea421f5cbba34d9cb973cb8ebe7bf5800d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Refine from Value of type List -> len of list +// If a refinement mapping of List Value * -> len is present in a block +// the list is guaranteed to be that length +// TODO: vector may be faster +using ListRefinement = std::unordered_map; + +TORCH_API ListRefinement +intersectRefinements(const ListRefinement& ref1, const ListRefinement& ref2); + +TORCH_API ListRefinement +unionRefinements(const ListRefinement& ref1, const ListRefinement& ref2); + +// Represents the refinement information that can be carried on a boolean +struct BooleanRefinementMapping { + BooleanRefinementMapping( + ListRefinement true_refine, + ListRefinement false_refine) + : true_refine_(std::move(true_refine)), + false_refine_(std::move(false_refine)){}; + BooleanRefinementMapping() = default; // empty + + static BooleanRefinementMapping FalseRefinements( + ListRefinement false_refine) { + return BooleanRefinementMapping({}, std::move(false_refine)); + } + + static BooleanRefinementMapping TrueRefinements(ListRefinement true_refine) { + return BooleanRefinementMapping(std::move(true_refine), {}); + } + + BooleanRefinementMapping intersectBooleanRefinementMapping( + BooleanRefinementMapping& other) { + return BooleanRefinementMapping( + intersectRefinements(true_refine_, other.true_refine()), + intersectRefinements(false_refine_, other.false_refine())); + } + + ListRefinement& true_refine() { + return true_refine_; + } + + ListRefinement& false_refine() { + return false_refine_; + } + + private: + ListRefinement true_refine_; + ListRefinement false_refine_; +}; + +TORCH_API void joinIfRefinements( + Node* if_node, + std::unordered_set& throwing_blocks, + ListRefinement& curr_block_refinements, + ListRefinement& true_block_refinements, + ListRefinement& false_block_refinements, + std::unordered_map& info); + +// handles adding blocks to throwing blocks and propagating refinements via +// boolean comparisons +TORCH_API bool handleCommonRefinentOperators( + Node* n, + std::unordered_set& throwing_blocks, + std::unordered_map& info); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..395d885e8e2c3c99f5e1a6d4279c9e0e26894d07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +TORCH_API void vulkanInsertPrePackedOps(std::shared_ptr& graph); +TORCH_API void vulkanInsertPrePackedOps(script::Module& module); +TORCH_API void vulkanFusePrePackedConvWithClamp(script::Module& module); +TORCH_API void vulkanFoldPrePackingOps(script::Module& module); +TORCH_API script::Module vulkanOptimizeForMobile( + const script::Module& module, + const std::set& optimization_blocklist, + const std::vector& preserved_methods); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h new file mode 100644 index 0000000000000000000000000000000000000000..6c2a2fa64b46a8405793b54a81082cfcb1a1f321 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace torch { +namespace jit { + +class ResourceGuard { + std::function _destructor; + bool _released{false}; + + public: + ResourceGuard(std::function destructor) + : _destructor(std::move(destructor)) {} + + // NOLINTNEXTLINE(bugprone-exception-escape) + ~ResourceGuard() { + if (!_released) + _destructor(); + } + + void release() { + _released = true; + } +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h new file mode 100644 index 0000000000000000000000000000000000000000..6e9290f5130baffe4c1fbbadb61e80b6c88d46d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Graph; + +namespace testing { + +struct FileCheckImpl; + +struct FileCheck { + public: + TORCH_API explicit FileCheck(); + TORCH_API ~FileCheck(); + + // Run FileCheck against test string + TORCH_API void run(const std::string& test_string); + + // Run FileCheck against dump of graph IR + TORCH_API void run(const Graph& graph); + + // Parsing input checks string and run against test string / dump of graph IR + TORCH_API void run( + const std::string& input_checks_string, + const std::string& test_string); + TORCH_API void run( + const std::string& input_checks_string, + const Graph& graph); + + // Checks that the string occurs, starting at the end of the most recent match + TORCH_API FileCheck* check(const std::string& str); + + // Checks that the string does not occur between the previous match and next + // match. Consecutive check_nots test against the same previous match and next + // match + TORCH_API FileCheck* check_not(const std::string& str); + + // Checks that the string occurs on the same line as the previous match + TORCH_API FileCheck* check_same(const std::string& str); + + // Checks that the string occurs on the line immediately following the + // previous match + TORCH_API FileCheck* check_next(const std::string& str); + + // Checks that the string occurs count number of times, starting at the end + // of the previous match. If exactly is true, checks that there are exactly + // count many matches + TORCH_API FileCheck* check_count( + const std::string& str, + size_t count, + bool exactly = false); + + // A series of consecutive check_dags get turned into a group of checks + // which can appear in any order relative to each other. The checks begin + // at the end of the previous match, and the match for the check_dag group + // is the minimum match of all individual checks to the maximum match of all + // individual checks. + TORCH_API FileCheck* check_dag(const std::string& str); + + // Checks that source token is highlighted in str (usually an error message). + TORCH_API FileCheck* check_source_highlighted(const std::string& str); + + // Checks that the regex matched string occurs, starting at the end of the + // most recent match + TORCH_API FileCheck* check_regex(const std::string& str); + + // reset checks + TORCH_API void reset(); + + private: + bool has_run = false; + std::unique_ptr fcImpl; +}; +} // namespace testing +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h new file mode 100644 index 0000000000000000000000000000000000000000..108dea3f1f72d79433faf1b9ddb56f54727ac6e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h @@ -0,0 +1,21 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Module; + +using ModuleHook = std::function; +using FunctionHook = std::function; + +TORCH_API void didFinishEmitModule(Module module); +TORCH_API void didFinishEmitFunction(StrongFunctionPtr defined); +TORCH_API void setEmitHooks(ModuleHook for_module, FunctionHook for_fn); + +TORCH_API std::pair getEmitHooks(); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..01a6007e9f8e824a1dae904d63cc01ef091b03af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h @@ -0,0 +1,7 @@ +#pragma once +#include +#include + +at::Dimname THPDimname_parse(PyObject* obj); +bool THPUtils_checkDimname(PyObject* obj); +bool THPUtils_checkDimnameList(PyObject* obj); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h new file mode 100644 index 0000000000000000000000000000000000000000..0130e41ccb46edf3ab5d5a35c80607383acbddf8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h @@ -0,0 +1,25 @@ +#pragma once +// workaround for https://github.com/python/cpython/pull/23326 +#include +#include +// workaround for Python 2 issue: https://bugs.python.org/issue17120 +// NOTE: It looks like this affects Python 3 as well. +#pragma push_macro("_XOPEN_SOURCE") +#pragma push_macro("_POSIX_C_SOURCE") +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE + +#include +#include +#include + +#pragma pop_macro("_XOPEN_SOURCE") +#pragma pop_macro("_POSIX_C_SOURCE") + +#ifdef copysign +#undef copysign +#endif + +#if PY_MAJOR_VERSION < 3 +#error "Python 2 has reached end-of-life and is no longer supported by PyTorch." +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h new file mode 100644 index 0000000000000000000000000000000000000000..3e10784c2459679bb1bed442782d0fa51cc88b2c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h @@ -0,0 +1,27 @@ +#ifndef THP_SERIALIZATION_INC +#define THP_SERIALIZATION_INC + +#include +#include +template +void doRead(io fildes, void* buf, size_t nbytes); + +template +void doWrite(io fildes, void* buf, size_t nbytes); + +// Note that this takes a mutable storage because it may pass through +// to at::from_blob. +template +void THPStorage_writeFileRaw( + c10::StorageImpl* self, + io fd, + bool save_size, + uint64_t element_size); + +template +c10::intrusive_ptr THPStorage_readFileRaw( + io fd, + c10::intrusive_ptr storage, + uint64_t element_size); + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..5a610c28d2b1e77f236591f39adc43057dc0c18b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h @@ -0,0 +1,217 @@ +#ifndef THP_UTILS_H +#define THP_UTILS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef USE_CUDA +#include +#endif + +#define THPUtils_(NAME) TH_CONCAT_4(THP, Real, Utils_, NAME) + +#define THPUtils_typename(obj) (Py_TYPE(obj)->tp_name) + +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define THP_EXPECT(x, y) (__builtin_expect((x), (y))) +#else +#define THP_EXPECT(x, y) (x) +#endif + +#define THPUtils_checkReal_FLOAT(object) \ + (PyFloat_Check(object) || PyLong_Check(object)) + +#define THPUtils_unpackReal_FLOAT(object) \ + (PyFloat_Check(object) ? PyFloat_AsDouble(object) \ + : PyLong_Check(object) \ + ? PyLong_AsLongLong(object) \ + : (throw std::runtime_error("Could not parse real"), 0)) + +#define THPUtils_checkReal_INT(object) PyLong_Check(object) + +#define THPUtils_unpackReal_INT(object) \ + (PyLong_Check(object) \ + ? PyLong_AsLongLong(object) \ + : (throw std::runtime_error("Could not parse real"), 0)) + +#define THPUtils_unpackReal_BOOL(object) \ + (PyBool_Check(object) \ + ? object \ + : (throw std::runtime_error("Could not parse real"), Py_False)) + +#define THPUtils_unpackReal_COMPLEX(object) \ + (PyComplex_Check(object) \ + ? (c10::complex( \ + PyComplex_RealAsDouble(object), PyComplex_ImagAsDouble(object))) \ + : PyFloat_Check(object) \ + ? (c10::complex(PyFloat_AsDouble(object), 0)) \ + : PyLong_Check(object) \ + ? (c10::complex(PyLong_AsLongLong(object), 0)) \ + : (throw std::runtime_error("Could not parse real"), \ + c10::complex(0, 0))) + +#define THPUtils_checkReal_BOOL(object) PyBool_Check(object) + +#define THPUtils_checkReal_COMPLEX(object) \ + PyComplex_Check(object) || PyFloat_Check(object) || PyLong_Check(object) || \ + PyInt_Check(object) + +#define THPUtils_newReal_FLOAT(value) PyFloat_FromDouble(value) +#define THPUtils_newReal_INT(value) PyInt_FromLong(value) + +#define THPUtils_newReal_BOOL(value) PyBool_FromLong(value) + +#define THPUtils_newReal_COMPLEX(value) \ + PyComplex_FromDoubles(value.real(), value.imag()) + +#define THPDoubleUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPDoubleUtils_unpackReal(object) \ + (double)THPUtils_unpackReal_FLOAT(object) +#define THPDoubleUtils_newReal(value) THPUtils_newReal_FLOAT(value) +#define THPFloatUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPFloatUtils_unpackReal(object) \ + (float)THPUtils_unpackReal_FLOAT(object) +#define THPFloatUtils_newReal(value) THPUtils_newReal_FLOAT(value) +#define THPHalfUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPHalfUtils_unpackReal(object) \ + (at::Half) THPUtils_unpackReal_FLOAT(object) +#define THPHalfUtils_newReal(value) PyFloat_FromDouble(value) +#define THPHalfUtils_newAccreal(value) THPUtils_newReal_FLOAT(value) +#define THPComplexDoubleUtils_checkReal(object) \ + THPUtils_checkReal_COMPLEX(object) +#define THPComplexDoubleUtils_unpackReal(object) \ + THPUtils_unpackReal_COMPLEX(object) +#define THPComplexDoubleUtils_newReal(value) THPUtils_newReal_COMPLEX(value) +#define THPComplexFloatUtils_checkReal(object) \ + THPUtils_checkReal_COMPLEX(object) +#define THPComplexFloatUtils_unpackReal(object) \ + (c10::complex)THPUtils_unpackReal_COMPLEX(object) +#define THPComplexFloatUtils_newReal(value) THPUtils_newReal_COMPLEX(value) +#define THPBFloat16Utils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPBFloat16Utils_unpackReal(object) \ + (at::BFloat16) THPUtils_unpackReal_FLOAT(object) +#define THPBFloat16Utils_newReal(value) PyFloat_FromDouble(value) +#define THPBFloat16Utils_newAccreal(value) THPUtils_newReal_FLOAT(value) + +#define THPBoolUtils_checkReal(object) THPUtils_checkReal_BOOL(object) +#define THPBoolUtils_unpackReal(object) THPUtils_unpackReal_BOOL(object) +#define THPBoolUtils_newReal(value) THPUtils_newReal_BOOL(value) +#define THPBoolUtils_checkAccreal(object) THPUtils_checkReal_BOOL(object) +#define THPBoolUtils_unpackAccreal(object) \ + (int64_t) THPUtils_unpackReal_BOOL(object) +#define THPBoolUtils_newAccreal(value) THPUtils_newReal_BOOL(value) +#define THPLongUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPLongUtils_unpackReal(object) \ + (int64_t) THPUtils_unpackReal_INT(object) +#define THPLongUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPIntUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPIntUtils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPIntUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPShortUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPShortUtils_unpackReal(object) (short)THPUtils_unpackReal_INT(object) +#define THPShortUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPCharUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPCharUtils_unpackReal(object) (char)THPUtils_unpackReal_INT(object) +#define THPCharUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPByteUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPByteUtils_unpackReal(object) \ + (unsigned char)THPUtils_unpackReal_INT(object) +#define THPByteUtils_newReal(value) THPUtils_newReal_INT(value) +// quantized types +#define THPQUInt8Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt8Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQInt8Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQInt8Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQInt32Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQInt32Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQInt32Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQUInt4x2Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt4x2Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt4x2Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQUInt2x4Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt2x4Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt2x4Utils_newReal(value) THPUtils_newReal_INT(value) + +/* + From https://github.com/python/cpython/blob/v3.7.0/Modules/xxsubtype.c + If compiled as a shared library, some compilers don't allow addresses of + Python objects defined in other libraries to be used in static PyTypeObject + initializers. The DEFERRED_ADDRESS macro is used to tag the slots where such + addresses appear; the module init function that adds the PyTypeObject to the + module must fill in the tagged slots at runtime. The argument is for + documentation -- the macro ignores it. +*/ +#define DEFERRED_ADDRESS(ADDR) nullptr + +TORCH_PYTHON_API void THPUtils_setError(const char* format, ...); +TORCH_PYTHON_API void THPUtils_invalidArguments( + PyObject* given_args, + PyObject* given_kwargs, + const char* function_name, + size_t num_options, + ...); + +bool THPUtils_checkIntTuple(PyObject* arg); +std::vector THPUtils_unpackIntTuple(PyObject* arg); + +TORCH_PYTHON_API void THPUtils_addPyMethodDefs( + std::vector& vector, + PyMethodDef* methods); + +int THPUtils_getCallable(PyObject* arg, PyObject** result); + +typedef THPPointer THPGeneratorPtr; +typedef class THPPointer THPStoragePtr; + +TORCH_PYTHON_API std::vector THPUtils_unpackLongs(PyObject* arg); +PyObject* THPUtils_dispatchStateless( + PyObject* tensor, + const char* name, + PyObject* args, + PyObject* kwargs); + +template +struct mod_traits {}; + +template +struct mod_traits<_real, std::enable_if_t>> { + static _real mod(_real a, _real b) { + return fmod(a, b); + } +}; + +template +struct mod_traits<_real, std::enable_if_t>> { + static _real mod(_real a, _real b) { + return a % b; + } +}; + +void setBackCompatBroadcastWarn(bool warn); +bool getBackCompatBroadcastWarn(); + +void setBackCompatKeepdimWarn(bool warn); +bool getBackCompatKeepdimWarn(); +bool maybeThrowBackCompatKeepdimWarn(char* func); + +// NB: This is in torch/csrc/cuda/utils.cpp, for whatever reason +#ifdef USE_CUDA +std::vector> +THPUtils_PySequence_to_CUDAStreamList(PyObject* obj); +#endif + +void storage_fill(const at::Storage& self, uint8_t value); +void storage_set(const at::Storage& self, ptrdiff_t idx, uint8_t value); +uint8_t storage_get(const at::Storage& self, ptrdiff_t idx); + +#endif