text
stringlengths
145
7.65M
============================================================================================================================================= SOURCE CODE FILE: framelocals_mapping.h LINES: 1 SIZE: 2.81 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\framelocals_mapping.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/utils/python_compat.h> #ifdef __cplusplus #include <string> #include <unordered_map> #include <torch/csrc/dynamo/utils.h> #include <torch/csrc/utils/pybind.h> extern "C" { #if IS_PYTHON_3_11_PLUS using FrameLocalsFrameType = _PyInterpreterFrame; #else using FrameLocalsFrameType = PyFrameObject; #endif // IS_PYTHON_3_11_PLUS /** * Utility to view a frame's localsplus (locals + cells + freevars) * in C/C++ and Python, without changing the state of the frame. * * Notes on usage: * - C/C++ can directly read the frame's localsplus using an index. * - Cell/free variables are unboxed. * - Can be converted into a dict for use in Python. * The dict is constructed once per FrameLocalsMapping, lazily. * - Lifetime should not exceed the lifetime of the frame * * How do guards use FrameLocalsMapping? * - When a guard accesses a frame's localsplus, we find the index of the * variable name in the frame's code object and create a * FrameLocalsGuardAccessor. * - We create a FrameLocalsMapping for the frame that we pass on to guard eval. * - LeafGuards/GuardManagers/GuardAccessors now need to define how they * handle FrameLocalsMapping. By default, the FrameLocalsMapping is converted * to a Python dict and the guard check is performed on the resulting dict. * - Some guard checks don't actually depend on the input arguments, e.g. they * only check global state. In this case, no dict conversion of * FrameLocalsMapping is done. * - FrameLocalsGuardAccessor is like DictGetItemGuardAccessor, except it knows * how to handle FrameLocalsMapping - by using the framelocals variable name * index that it was given when it was built. */ typedef struct VISIBILITY_HIDDEN FrameLocalsMapping { private: py::object _code_obj; // can't use localsplus directly due to closure variables: // - in 3.11+, the closure vars in the frame's closure object and // the corresponding localsplus entry is nullptr // - regardless of Python version, we need to unbox the cell variable std::vector<py::handle> _framelocals; py::object _dict{py::none()}; void _realize_dict(); public: explicit FrameLocalsMapping(FrameLocalsFrameType* frame); PyObject* get(int idx); bool dict_realized() const { return _dict.is_none(); } // Borrowed reference PyDictObject* to_dict() { if (this->dict_realized()) { _realize_dict(); } return (PyDictObject*)_dict.ptr(); } } FrameLocalsMapping; #else // opaque type for C typedef struct FrameLocalsMapping FrameLocalsMapping; #endif // Borrowed reference PyDictObject* framelocals_mapping_to_dict(FrameLocalsMapping* map); #ifdef __cplusplus } // extern "C" py::tuple code_framelocals_names(py::handle code); #endif ```
================================================================================================================================ SOURCE CODE FILE: guards.h LINES: 1 SIZE: 2.99 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\guards.h ENCODING: utf-8 ```h #pragma once #include <c10/core/GradMode.h> #include <torch/csrc/dynamo/framelocals_mapping.h> #include <torch/csrc/python_headers.h> #include <torch/csrc/utils/pybind.h> namespace torch::dynamo { PyObject* torch_c_dynamo_guards_init(); // interfaces for extra_state and eval_frame.c because RootGuardManager class is // not visible there. void* convert_to_root_guard_manager(py::object root); bool run_root_guard_manager(void* root, FrameLocalsMapping* f_locals); struct LocalState { // TLS state that changes operators c10::impl::LocalDispatchKeySet dispatch_modifier; c10::DispatchKeySet override_dispatch_key_set; bool grad_mode_enabled; at::DispatchKeySet apply(at::DispatchKeySet ks) const { if (override_dispatch_key_set.empty()) { return (ks | dispatch_modifier.included_) - dispatch_modifier.excluded_; } else { return override_dispatch_key_set; } } LocalState() : dispatch_modifier(c10::impl::tls_local_dispatch_key_set()), override_dispatch_key_set(c10::BackendComponent::InvalidBit), grad_mode_enabled(at::GradMode::is_enabled()) {} void overrideDispatchKeySet(c10::DispatchKeySet ks) { override_dispatch_key_set = ks; } }; class TensorCheck { public: TensorCheck( const LocalState& state, PyTypeObject* pt, const at::Tensor& v, std::vector<std::optional<c10::SymInt>> dynamic_dims_sizes, std::vector<std::optional<c10::SymInt>> dynamic_dims_strides); TensorCheck( const LocalState& state, PyTypeObject* pt, c10::DispatchKeySet dispatch_key_set, at::ScalarType dtype, at::DeviceIndex device_index, bool requires_grad, std::vector<std::optional<c10::SymInt>> dynamic_dims_sizes, std::vector<std::optional<c10::SymInt>> dynamic_dims_strides); bool check(const LocalState& state, const at::Tensor& v); bool check( const LocalState& state, const c10::DispatchKeySet& dispatch_key_set, const at::ScalarType& dtype, const c10::Device& device, const c10::SymIntArrayRef& dynamic_dims_sizes, const c10::SymIntArrayRef& dynamic_dims_strides, const bool& requires_grad); std::string check_verbose( const LocalState& state, const at::Tensor& v, const std::string& tensor_name); PyTypeObject* pytype; private: uint64_t dispatch_key_; // DispatchKeySet includes device/layout at::ScalarType dtype_; // Note(voz): While dispatch_key_ is sufficiently representative of a device // In that keys are more granular AND device specific - they do not // necessarily capture device indices correctly. at::DeviceIndex device_index_; bool requires_grad_; // NB: These are unset if dynamic shapes is enabled. std::vector<std::optional<c10::SymInt>> sizes_; std::vector<std::optional<c10::SymInt>> strides_; // Not strictly required for dense tensors, but nested tensors need it. int64_t dim_; }; } // namespace torch::dynamo ```
============================================================================================================================== SOURCE CODE FILE: init.h LINES: 1 SIZE: 0.19 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\init.h ENCODING: utf-8 ```h #pragma once // C2039 MSVC #include <pybind11/complex.h> #include <torch/csrc/utils/pybind.h> #include <Python.h> namespace torch::dynamo { void initDynamoBindings(PyObject* torch); } ```
================================================================================================================================================== SOURCE CODE FILE: python_compiled_autograd.h LINES: 1 SIZE: 0.22 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\python_compiled_autograd.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/utils/python_stub.h> // see [Note: Compiled Autograd] namespace torch::dynamo::autograd { PyObject* torch_c_dynamo_compiled_autograd_init(); } // namespace torch::dynamo::autograd ```
=============================================================================================================================== SOURCE CODE FILE: utils.h LINES: 1 SIZE: 0.52 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\utils.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/python_headers.h> // C2039 MSVC #include <pybind11/complex.h> #include <torch/csrc/utils/pybind.h> #include <Python.h> // The visibility attribute is to avoid a warning about storing a field in the // struct that has a different visibility (from pybind) than the struct. #ifdef _WIN32 #define VISIBILITY_HIDDEN #else #define VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) #endif namespace torch::dynamo { PyObject* torch_c_dynamo_utils_init(); } // namespace torch::dynamo ```
================================================================================================================================================== SOURCE CODE FILE: array_ref.h LINES: 1 SIZE: 0.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_include\array_ref.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_include/common.h> #include <torch/csrc/inductor/aoti_runtime/arrayref_tensor.h> #include <torch/csrc/inductor/aoti_runtime/thread_local.h> #include <torch/csrc/inductor/array_ref_impl.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/cpu.h> ```
=============================================================================================================================================== SOURCE CODE FILE: common.h LINES: 1 SIZE: 0.48 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_include\common.h ENCODING: utf-8 ```h #pragma once #include <array> #include <filesystem> #include <optional> #include <torch/csrc/inductor/aoti_runtime/interface.h> #include <torch/csrc/inductor/aoti_runtime/model.h> #include <c10/util/generic_math.h> #include <torch/csrc/inductor/aoti_runtime/scalar_to_tensor.h> using half = at::Half; using bfloat16 = at::BFloat16; // Round up to the nearest multiple of 64 [[maybe_unused]] inline int64_t align(int64_t nbytes) { return (nbytes + 64 - 1) & -64; } ```
============================================================================================================================================ SOURCE CODE FILE: cpu.h LINES: 1 SIZE: 0.13 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_include\cpu.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_include/common.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/cpu.h> ```
============================================================================================================================================= SOURCE CODE FILE: cuda.h LINES: 1 SIZE: 0.13 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_include\cuda.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_include/common.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/cuda.h> ```
============================================================================================================================================ SOURCE CODE FILE: xpu.h LINES: 1 SIZE: 0.13 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_include\xpu.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_include/common.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/xpu.h> ```
============================================================================================================================================================= SOURCE CODE FILE: model_package_loader.h LINES: 1 SIZE: 1.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_package\model_package_loader.h ENCODING: utf-8 ```h #if !defined(C10_MOBILE) && !defined(ANDROID) #pragma once #include <ATen/Tensor.h> #include <torch/csrc/inductor/aoti_runner/model_container_runner.h> namespace torch::inductor { class TORCH_API AOTIModelPackageLoader { public: AOTIModelPackageLoader( const std::string& model_package_path, const std::string& model_name = "model", const bool run_single_threaded = false); ~AOTIModelPackageLoader(); AOTIModelContainerRunner* get_runner(); std::unordered_map<std::string, std::string> get_metadata(); std::vector<at::Tensor> run( const std::vector<at::Tensor>& inputs, void* stream_handle = nullptr); // boxed_run will steal the ownership of the input tensors std::vector<at::Tensor> boxed_run( std::vector<at::Tensor>&& inputs, void* stream_handle = nullptr); std::vector<std::string> get_call_spec(); void load_constants( std::unordered_map<std::string, at::Tensor>& constants_map, bool use_inactive, bool check_full_update); std::vector<std::string> get_constant_fqns(); private: std::string temp_dir_; std::unique_ptr<AOTIModelContainerRunner> runner_; std::unordered_map<std::string, std::string> metadata_; void load_metadata(const std::string& cpp_filename); }; } // namespace torch::inductor #endif ```
=============================================================================================================================================== SOURCE CODE FILE: pybind.h LINES: 1 SIZE: 0.15 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_package\pybind.h ENCODING: utf-8 ```h #include <torch/csrc/python_headers.h> namespace torch::inductor { void initAOTIPackageBindings(PyObject* module); } // namespace torch::inductor ```
============================================================================================================================================================== SOURCE CODE FILE: model_container_runner.h LINES: 1 SIZE: 4.60 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runner\model_container_runner.h ENCODING: utf-8 ```h #if !defined(C10_MOBILE) && !defined(ANDROID) #pragma once #include <ATen/Tensor.h> #include <torch/csrc/inductor/aoti_runtime/interface.h> #include <torch/csrc/inductor/aoti_torch/proxy_executor.h> // Forward declare DynamicLibrary namespace at { struct DynamicLibrary; } namespace torch::inductor { using TensorConstantMap = std::unordered_map<std::string, at::Tensor*>; class TORCH_API AOTIModelContainerRunner { public: AOTIModelContainerRunner() = delete; AOTIModelContainerRunner(const AOTIModelContainerRunner& other) = delete; AOTIModelContainerRunner(AOTIModelContainerRunner&& other) = delete; AOTIModelContainerRunner& operator=(const AOTIModelContainerRunner& other) = delete; AOTIModelContainerRunner& operator=(AOTIModelContainerRunner&& other) = delete; virtual ~AOTIModelContainerRunner(); std::vector<at::Tensor> run( const std::vector<at::Tensor>& inputs, void* stream_handle = nullptr); // boxed_run will steal the ownership of the input tensors std::vector<at::Tensor> boxed_run( std::vector<at::Tensor>&& inputs, void* stream_handle = nullptr); std::unordered_map<std::string, std::string> getConstantNamesToOriginalFQNs() const; std::unordered_map<std::string, int32_t> getConstantNamesToDtypes() const; void update_inactive_constant_buffer(const TensorConstantMap& const_map); void update_constant_buffer( std::unordered_map<std::string, at::Tensor>& tensor_map, bool use_inactive, bool validate_full_updates); void update_constant_buffer( const TensorConstantMap& const_map, bool use_inactive, bool validate_full_updates); void run_const_fold( bool use_inactive, AOTInductorStreamHandle cuda_stream_handle = nullptr); void swap_constant_buffer(); std::vector<std::string> get_call_spec(); protected: AOTIModelContainerRunner( const std::string& model_so_path, size_t num_models, const std::string& device_str, const std::string& cubin_dir, const bool run_single_threaded); virtual std::vector<at::Tensor> run_impl( std::vector<AtenTensorHandle>& input_handles, void* stream_handle); std::unique_ptr<at::DynamicLibrary> model_so_; decltype(&AOTInductorModelContainerCreateWithDevice) create_func_{nullptr}; decltype(&AOTInductorModelContainerDelete) delete_func_{nullptr}; decltype(&AOTInductorModelContainerGetNumOutputs) get_num_outputs_func_{ nullptr}; decltype(&AOTInductorModelContainerRun) run_func_{nullptr}; decltype(&AOTInductorModelContainerGetNumConstants) get_num_constants_func_{ nullptr}; decltype(&AOTInductorModelContainerGetConstantName) get_constant_name_func_{ nullptr}; decltype(&AOTInductorModelContainerGetConstantOriginalFQN) get_constant_original_fqn_func_{nullptr}; decltype(&AOTInductorModelContainerGetConstantDtype) get_constant_dtype_func_{ nullptr}; decltype(&AOTInductorModelContainerUpdateConstantBuffer) update_constant_buffer_func_{nullptr}; decltype(&AOTInductorModelContainerUpdateInactiveConstantBuffer) update_inactive_constant_buffer_func_{nullptr}; decltype(&AOTInductorModelContainerRunConstantFolding) run_const_fold_func_{ nullptr}; decltype(&AOTInductorModelContainerSwapConstantBuffer) swap_constant_buffer_func_{nullptr}; decltype(&AOTInductorModelContainerGetCallSpec) get_call_spec_func_{nullptr}; AOTInductorModelContainerHandle container_handle_ = nullptr; AOTIProxyExecutorHandle proxy_executor_handle_; private: std::unique_ptr<torch::aot_inductor::ProxyExecutor> proxy_executor_; }; using CreateAOTIModelRunnerFunc = std::unique_ptr<AOTIModelContainerRunner> (*)( const std::string& model_so_path, size_t num_models, const std::string& device_str, const std::string& bin_dir, const bool run_single_threaded); // Return a global map "device name" -> "aoti model runner create function" for // all registered in AOTI external backends TORCH_API std::unordered_map<std::string, CreateAOTIModelRunnerFunc>& getAOTIModelRunnerRegistry(); // To register a new external backend in AOTI one needs to create an instance of // this struct. It is not thread-safe. Becase it is expected to be called during // the initialization of the program. struct TORCH_API RegisterAOTIModelRunner { RegisterAOTIModelRunner( const std::string& name, CreateAOTIModelRunnerFunc create_aoti_model_runner_fn) { getAOTIModelRunnerRegistry()[name] = create_aoti_model_runner_fn; } }; } // namespace torch::inductor #endif ```
================================================================================================================================================================== SOURCE CODE FILE: model_container_runner_cpu.h LINES: 1 SIZE: 0.49 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runner\model_container_runner_cpu.h ENCODING: utf-8 ```h #if !defined(C10_MOBILE) && !defined(ANDROID) #pragma once #include <torch/csrc/inductor/aoti_runner/model_container_runner.h> namespace torch::inductor { class TORCH_API AOTIModelContainerRunnerCpu : public AOTIModelContainerRunner { public: AOTIModelContainerRunnerCpu( const std::string& model_so_path, size_t num_models = 1, const bool run_single_threaded = false); ~AOTIModelContainerRunnerCpu() override; }; } // namespace torch::inductor #endif ```
=================================================================================================================================================================== SOURCE CODE FILE: model_container_runner_cuda.h LINES: 1 SIZE: 1.14 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runner\model_container_runner_cuda.h ENCODING: utf-8 ```h #if !defined(C10_MOBILE) && !defined(ANDROID) #pragma once #include <c10/cuda/CUDAStream.h> #include <torch/csrc/inductor/aoti_runner/model_container_runner.h> namespace torch::inductor { // NOTICE: Following APIs are subject to change due to active development // We provide NO BC guarantee for these APIs // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions) class TORCH_CUDA_CPP_API AOTIModelContainerRunnerCuda : public AOTIModelContainerRunner { public: // @param device_str: cuda device string, e.g. "cuda", "cuda:0" AOTIModelContainerRunnerCuda( const std::string& model_so_path, size_t num_models = 1, const std::string& device_str = "cuda", const std::string& cubin_dir = "", const bool run_single_threaded = false); ~AOTIModelContainerRunnerCuda() override; std::vector<at::Tensor> run_impl( std::vector<AtenTensorHandle>& input_handles, void* stream_handle) override; std::vector<at::Tensor> run_with_cuda_stream( const std::vector<at::Tensor>& inputs, const at::cuda::CUDAStream& cuda_stream); }; } // namespace torch::inductor #endif ```
================================================================================================================================================================== SOURCE CODE FILE: model_container_runner_xpu.h LINES: 1 SIZE: 1.22 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runner\model_container_runner_xpu.h ENCODING: utf-8 ```h #if !defined(C10_MOBILE) && !defined(ANDROID) #pragma once #include <c10/xpu/XPUStream.h> #include <torch/csrc/inductor/aoti_runner/model_container_runner.h> namespace torch::inductor { // NOTICE: Following APIs are subject to change due to active development // We provide NO BC guarantee for these APIs // HERE we use C10_EXPORT because libtorch_python needs this Symbol be exported. // And `TORCH_API and `TORCH_XPU_API`` do not export the symbol in Windows // build. class C10_EXPORT AOTIModelContainerRunnerXpu : public AOTIModelContainerRunner { public: // @param device_str: xpu device string, e.g. "xpu", "xpu:0" AOTIModelContainerRunnerXpu( const std::string& model_so_path, size_t num_models = 1, const std::string& device_str = "xpu", const std::string& kernel_bin_dir = "", const bool run_single_threaded = false); ~AOTIModelContainerRunnerXpu() override; std::vector<at::Tensor> run_impl( std::vector<AtenTensorHandle>& input_handles, void* stream_handle) override; std::vector<at::Tensor> run_with_xpu_stream( const std::vector<at::Tensor>& inputs, const at::xpu::XPUStream& xpu_stream); }; } // namespace torch::inductor #endif ```
============================================================================================================================================== SOURCE CODE FILE: pybind.h LINES: 1 SIZE: 0.15 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runner\pybind.h ENCODING: utf-8 ```h #include <torch/csrc/python_headers.h> namespace torch::inductor { void initAOTIRunnerBindings(PyObject* module); } // namespace torch::inductor ```
======================================================================================================================================================== SOURCE CODE FILE: arrayref_tensor.h LINES: 1 SIZE: 10.88 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\arrayref_tensor.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_runtime/utils.h> #include <torch/csrc/inductor/aoti_torch/c/shim.h> #include <cassert> #include <cstdint> #include <cstring> namespace torch::aot_inductor { // Can't use c10::ArrayRef because it's not truly header-only and // pulls in other c10 headers. This is (sadly) copy-pasted and // adapted. template <typename T> class MiniArrayRef final { public: using iterator = T*; using const_iterator = const T*; using size_type = size_t; using value_type = T; using reverse_iterator = std::reverse_iterator<iterator>; private: /// The start of the array, in an external buffer. T* Data; /// The number of elements. size_type Length; public: /// @name Constructors /// @{ /// Construct an empty MiniArrayRef. /* implicit */ constexpr MiniArrayRef() : Data(nullptr), Length(0) {} /// Construct an MiniArrayRef from a single element. // TODO Make this explicit constexpr MiniArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {} /// Construct an MiniArrayRef from a pointer and length. constexpr MiniArrayRef(T* data, size_t length) : Data(data), Length(length) {} /// Construct an MiniArrayRef from a range. constexpr MiniArrayRef(T* begin, T* end) : Data(begin), Length(end - begin) {} template < typename Container, typename = std::enable_if_t<std::is_same_v< std::remove_const_t<decltype(std::declval<Container>().data())>, T*>>> /* implicit */ MiniArrayRef(Container& container) : Data(container.data()), Length(container.size()) {} /// Construct an MiniArrayRef from a std::vector. // The enable_if stuff here makes sure that this isn't used for // std::vector<bool>, because MiniArrayRef can't work on a std::vector<bool> // bitfield. template <typename A> /* implicit */ MiniArrayRef(const std::vector<T, A>& Vec) : Data(Vec.data()), Length(Vec.size()) { static_assert( !std::is_same_v<T, bool>, "MiniArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield."); } /// Construct an MiniArrayRef from a std::array template <size_t N> /* implicit */ constexpr MiniArrayRef(std::array<T, N>& Arr) : Data(Arr.data()), Length(N) {} /// Construct an MiniArrayRef from a C array. template <size_t N> // NOLINTNEXTLINE(*c-array*) /* implicit */ constexpr MiniArrayRef(T (&Arr)[N]) : Data(Arr), Length(N) {} // /// Construct an MiniArrayRef from an empty C array. /* implicit */ constexpr MiniArrayRef(const volatile void* Arr) : Data(nullptr), Length(0) {} /// Construct an MiniArrayRef from a std::initializer_list. /* implicit */ constexpr MiniArrayRef(const std::initializer_list<T>& Vec) : Data( std::begin(Vec) == std::end(Vec) ? static_cast<T*>(nullptr) : std::begin(Vec)), Length(Vec.size()) {} /// @} /// @name Simple Operations /// @{ constexpr iterator begin() const { return Data; } constexpr iterator end() const { return Data + Length; } // These are actually the same as iterator, since MiniArrayRef only // gives you const iterators. constexpr const_iterator cbegin() const { return Data; } constexpr const_iterator cend() const { return Data + Length; } constexpr reverse_iterator rbegin() const { return reverse_iterator(end()); } constexpr reverse_iterator rend() const { return reverse_iterator(begin()); } /// empty - Check if the array is empty. constexpr bool empty() const { return Length == 0; } constexpr T* data() const { return Data; } /// size - Get the array size. constexpr size_t size() const { return Length; } /// equals - Check for element-wise equality. constexpr bool equals(MiniArrayRef RHS) const { return Length == RHS.Length && std::equal(begin(), end(), RHS.begin()); } /// @} /// @name Operator Overloads /// @{ constexpr const T& operator[](size_t Index) const { return Data[Index]; } /// Disallow accidental assignment from a temporary. /// /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template <typename U> std::enable_if_t<std::is_same_v<U, T>, MiniArrayRef<T>>& operator=( // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) U&& Temporary) = delete; /// Disallow accidental assignment from a temporary. /// /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template <typename U> std::enable_if_t<std::is_same_v<U, T>, MiniArrayRef<T>>& operator=( std::initializer_list<U>) = delete; }; using MiniIntArrayRef = MiniArrayRef<int64_t>; static_assert( sizeof(MiniIntArrayRef) == sizeof(void*) + sizeof(size_t), "changing the size of MiniArrayRef breaks ABI compatibility!"); inline bool is_contiguous_strides_for_shape( int64_t ndim, const int64_t* strides_ptr, const int64_t* sizes_ptr) { int64_t z = 1; for (int64_t d = ndim - 1; d >= 0; d--) { const auto& size_d = sizes_ptr[d]; if (size_d != 1) { if (strides_ptr[d] == z) { z *= size_d; } else { return false; } } } return true; } // Shim for AOTI generated code to pretend a raw array works like an // AtenTensorHandle. template <typename T> class ArrayRefTensor { public: ArrayRefTensor() = default; explicit ArrayRefTensor( MiniArrayRef<T> arr, MiniArrayRef<const int64_t> sizes, MiniArrayRef<const int64_t> strides, int32_t device_type, int32_t device_idx) : arrayRef_(arr), sizes_(sizes), strides_(strides), device_type_(device_type), device_idx_(device_idx) { assert(sizes.size() == strides.size()); assert(is_contiguous_strides_for_shape( sizes.size(), strides.data(), sizes.data())); } AtenTensorHandle expensiveCopyToTensor() const { AtenTensorHandle result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_empty_strided( sizes_.size(), sizes_.data(), strides_.data(), aoti_torch_dtype<std::remove_const_t<T>>(), device_type_, device_idx_, &result)); void* dataPtr = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(result, &dataPtr)); std::memcpy(dataPtr, data(), numel() * sizeof(T)); return result; } // We need to look the same as RAIIAtenTensorHandle, which returns // an owning AtenTensorHandle from release(). So, we allocate one! AtenTensorHandle release() { return expensiveCopyToTensor(); } AtenTensorHandle borrowAsTensor() const { AtenTensorHandle result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob_v2( data(), sizes_.size(), sizes_.data(), strides_.data(), 0, aoti_torch_dtype<std::remove_const_t<T>>(), device_type_, device_idx_, &result, aoti_torch_layout_strided(), nullptr, 0)); return result; } // We don't need to free any memory. void reset() {} auto sizes() const { return sizes_; } auto strides() const { return strides_; } auto device_type() const { return device_type_; } auto device_idx() const { return device_idx_; } T* data() const { return arrayRef_.data(); } auto numel() const { return arrayRef_.size(); } void set_arrayref(MiniArrayRef<T> new_arrayref) { arrayRef_ = new_arrayref; } private: MiniArrayRef<T> arrayRef_; // We expect generated code to have statically available sizes & // strides for us. MiniArrayRef<const int64_t> sizes_; MiniArrayRef<const int64_t> strides_; int32_t device_type_ = 0; int32_t device_idx_ = 0; // We continue to zero-initialize this field in case we repurpose // the space later; having predictable contents can only help. int32_t unusedDoNotRemoveForABICompatibility_ = 0; }; static_assert( sizeof(ArrayRefTensor<int>) == 3 * sizeof(MiniIntArrayRef) + 3 * sizeof(int32_t) + (alignof(ArrayRefTensor<int>) > 4 ? sizeof(int32_t) : 0), "changing the size of ArrayRefTensor breaks ABI compatibility!"); template <typename T> inline ArrayRefTensor<T> reinterpret_tensor_wrapper( const ArrayRefTensor<T>& self, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int64_t storage_offset) { // REVIEW: we should add a way to build the DSO in debug mode during // tests so we can have checks like this! assert(is_contiguous_strides_for_shape(ndim, strides_ptr, sizes_ptr)); return ArrayRefTensor<T>( MiniArrayRef<T>( self.data() + storage_offset, self.numel() - storage_offset), MiniArrayRef<const int64_t>(sizes_ptr, ndim), MiniArrayRef<const int64_t>(strides_ptr, ndim), self.device_type(), self.device_idx()); } template <typename T> inline T* get_data_ptr_wrapper(ArrayRefTensor<T>& tensor) { return tensor.data(); } template <typename T> inline T* get_data_ptr_wrapper(const MiniArrayRef<T>& arr) { return arr.data(); } template <typename T> inline const ArrayRefTensor<T>& unwrap_raii_handle_if_needed( const ArrayRefTensor<T>& tensor) { return tensor; } template <typename T> inline ArrayRefTensor<T>& unwrap_raii_handle_if_needed( ArrayRefTensor<T>& tensor) { return tensor; } template <typename T> inline const ArrayRefTensor<T>& wrap_with_raii_handle_if_needed( const ArrayRefTensor<T>& tensor) { return tensor; } template <typename T> inline ArrayRefTensor<T>& wrap_with_raii_handle_if_needed( ArrayRefTensor<T>& tensor) { return tensor; } template <typename T> inline ArrayRefTensor<T> wrap_with_raii_handle_if_needed( ArrayRefTensor<T>&& tensor) { return std::move(tensor); } template <typename T> inline RAIIAtenTensorHandle expensive_copy_to_tensor_if_needed( const ArrayRefTensor<T>& tensor) { return tensor.expensiveCopyToTensor(); } inline AtenTensorHandle expensive_copy_to_tensor_if_needed( AtenTensorHandle handle) { return handle; } template <typename T> const T& copy_arrayref_tensor_to_tensor(const T& t) { return t; } template <typename T> RAIIAtenTensorHandle copy_arrayref_tensor_to_tensor( const ArrayRefTensor<T>& art) { return art.expensiveCopyToTensor(); } template <typename T> const T& borrow_arrayref_tensor_as_tensor(const T& t) { return t; } template <typename T> RAIIAtenTensorHandle borrow_arrayref_tensor_as_tensor( const ArrayRefTensor<T>& art) { return art.borrowAsTensor(); } } // namespace torch::aot_inductor ```
===================================================================================================================================================== SOURCE CODE FILE: device_utils.h LINES: 1 SIZE: 2.33 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\device_utils.h ENCODING: utf-8 ```h #pragma once // WARNING: Be careful when adding new includes here. This header will be used // in model.so, and should not refer to any aten/c10 headers except the stable // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule // applies to other files under torch/csrc/inductor/aoti_runtime/. #ifdef USE_CUDA // FIXME: Currently, CPU and CUDA backend are mutually exclusive. // This is a temporary workaround. We need a better way to support // multi devices. #include <cuda.h> #include <cuda_runtime_api.h> #define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \ do { \ const cudaError_t code = EXPR; \ const char* msg = cudaGetErrorString(code); \ if (code != cudaSuccess) { \ throw std::runtime_error( \ std::string("CUDA error: ") + std::string(msg)); \ } \ } while (0) namespace torch::aot_inductor { using DeviceStreamType = cudaStream_t; } // namespace torch::aot_inductor #elif defined(USE_XPU) #include <level_zero/ze_api.h> #include <sycl/sycl.hpp> #include <sstream> #define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \ do { \ const ze_result_t status = EXPR; \ if (status != ZE_RESULT_SUCCESS) { \ std::stringstream ss; \ ss << "L0 runtime error: " << std::hex << std::uppercase << status; \ throw std::runtime_error(ss.str()); \ } \ } while (0) namespace torch::aot_inductor { using DeviceStreamType = sycl::queue*; } // namespace torch::aot_inductor #else #define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \ bool ok = EXPR; \ if (!ok) { \ throw std::runtime_error("CPU runtime error"); \ } namespace torch::aot_inductor { using DeviceStreamType = void*; } // namespace torch::aot_inductor #endif // USE_CUDA ```
================================================================================================================================================== SOURCE CODE FILE: interface.h LINES: 1 SIZE: 8.54 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\interface.h ENCODING: utf-8 ```h #pragma once // WARNING: Be careful when adding new includes here. This header will be used // in model.so, and should not refer to any aten/c10 headers except the stable // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule // applies to other files under torch/csrc/inductor/aoti_runtime/. #include <torch/csrc/inductor/aoti_runtime/utils.h> extern "C" { struct AOTInductorModelOpaque; using AOTInductorModelHandle = AOTInductorModelOpaque*; struct AOTInductorModelContainerOpaque; using AOTInductorModelContainerHandle = AOTInductorModelContainerOpaque*; struct AOTInductorStreamOpaque; using AOTInductorStreamHandle = AOTInductorStreamOpaque*; struct AOTInductorConstantMap; using AOTInductorConstantMapHandle = AOTInductorConstantMap*; // TODO: Deprecate this API. This was kept for BC compatibility. // Please use AOTInductorModelContainerCreateWithDevice instead. AOTIRuntimeError AOTInductorModelContainerCreate( AOTInductorModelContainerHandle* container_handle, size_t num_models, bool is_cpu, const char* cubin_dir); // Creates an AOTInductor model container. The parameter num_models // specifies the number of model instances that may be run concurrently for // the same input model. // `device_str` MUST NOT be nullptr. It must be a valid device string, e.g. // "cpu", "cuda", "cuda:0", etc. If the device index is not specified for CUDA // device, runtime will use the device index returned by // "cudaGetDevice(&device_idx)" AOTIRuntimeError AOTInductorModelContainerCreateWithDevice( AOTInductorModelContainerHandle* container_handle, size_t num_models, const char* device_str, const char* cubin_dir); // Deletes the AOTInductor model container. AOTIRuntimeError AOTInductorModelContainerDelete( AOTInductorModelContainerHandle container_handle); // Runs the inference. AOTIRuntimeError AOTInductorModelContainerRun( AOTInductorModelContainerHandle container_handle, AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles // are stolen; the array itself is borrowed size_t num_inputs, AtenTensorHandle* output_handles, // array for writing output AtenTensorHandle; handles // will be stolen by the caller; the array itself is // borrowed size_t num_outputs, AOTInductorStreamHandle stream_handle, AOTIProxyExecutorHandle proxy_executor_handle); // Single-threaded variant of previous. AOTIRuntimeError AOTInductorModelContainerRunSingleThreaded( AOTInductorModelContainerHandle container_handle, AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles // are stolen; the array itself is borrowed size_t num_inputs, AtenTensorHandle* output_handles, // array for writing output AtenTensorHandle; handles // will be stolen by the caller; the array itself is // borrowed size_t num_outputs, AOTInductorStreamHandle stream_handle, AOTIProxyExecutorHandle proxy_executor_handle); // Retrieves the number of constants for the model. AOTIRuntimeError AOTInductorModelContainerGetNumConstants( AOTInductorModelContainerHandle container_handle, size_t* num_constants); // Retrieves a constant's name. // idx is the index of the internal's constants. // Need idx < num_constants from AOTInductorModelContainerGetNumConstants AOTIRuntimeError AOTInductorModelContainerGetConstantName( AOTInductorModelContainerHandle container_handle, size_t idx, const char** name); // Retrieves a constant's original FQN. // idx is the index of the internal's constants. // Need idx < num_constants from AOTInductorModelContainerGetNumConstants AOTIRuntimeError AOTInductorModelContainerGetConstantOriginalFQN( AOTInductorModelContainerHandle container_handle, size_t idx, const char** original_fqn); // Retrieves whether a constant is from folded. // idx is the index of the internal's constants. // Need idx < num_constants from AOTInductorModelContainerGetNumConstants AOTIRuntimeError AOTInductorModelContainerGetConstantFromFolded( AOTInductorModelContainerHandle container_handle, size_t idx, bool* from_folded); // Retrieves the inductor constant type. // idx is the index of the internal's constants. // Need idx < num_constants from AOTInductorModelContainerGetNumConstants AOTIRuntimeError AOTInductorModelContainerGetConstantType( AOTInductorModelContainerHandle container_handle, size_t idx, int32_t* type); // Retrieves a constant's dtype. // idx is the index of the internal's constants. // Need idx < num_constants from AOTInductorModelContainerGetNumConstants AOTIRuntimeError AOTInductorModelContainerGetConstantDtype( AOTInductorModelContainerHandle container_handle, size_t idx, int32_t* dtype); // Setup the constant buffer in model container with provided ConstantMap // use_inactive should be set as true if the inactive buffer is to be updated. // validate_full_update checks if all constants are included in the ConstantMap AOTIRuntimeError AOTInductorModelContainerUpdateConstantBuffer( AOTInductorModelContainerHandle container_handle, AOTInductorConstantMapHandle constant_map_handle, bool use_inactive, bool validate_full_update); // Setup the inactive constant buffer in model container with provided // ConstantMap AOTIRuntimeError AOTInductorModelContainerUpdateInactiveConstantBuffer( AOTInductorModelContainerHandle container_handle, AOTInductorConstantMapHandle constant_map_handle); // Run constant folding on constant buffer. AOTIRuntimeError AOTInductorModelContainerRunConstantFolding( AOTInductorModelContainerHandle container_handle, bool use_inactive, AOTInductorStreamHandle stream_handle, AOTIProxyExecutorHandle proxy_executor_handle); // Swap the constant buffer being used to the inactive one. AOTIRuntimeError AOTInductorModelContainerSwapConstantBuffer( AOTInductorModelContainerHandle container_handle); // Retrieves the number of inputs for the model. AOTIRuntimeError AOTInductorModelContainerGetNumInputs( AOTInductorModelContainerHandle container_handle, size_t* ret_num_inputs); // Retrieves the input name at the given index. AOTIRuntimeError AOTInductorModelContainerGetInputName( AOTInductorModelContainerHandle container_handle, size_t input_idx, const char** ret_input_names); // Retrieves the number of outputs for the model. AOTIRuntimeError AOTInductorModelContainerGetNumOutputs( AOTInductorModelContainerHandle container_handle, size_t* ret_num_outputs); // Retrieves the output name at the given index. AOTIRuntimeError AOTInductorModelContainerGetOutputName( AOTInductorModelContainerHandle container_handle, size_t output_idx, const char** ret_output_names); // Creates an AOTInductorModel instance. This is a thin and light wrapper // around the compiled model; it doesn't handle concurrency, queueing, device // management, etc. Use this if bare-metal performance is needed and you are // willing to handle other "management" aspects yourself. // // constant_map_handle is an opaque type to satisfy the C ABI. It should be a // std::unordered_map<std::string, at::Tensor*>*. AOTIRuntimeError AOTInductorModelCreate( AOTInductorModelHandle* model_handle, AOTInductorConstantMapHandle constant_map_handle); // Run an AOTInductorModel (see AOTInductorModelCreate for when one should use // this function versus AOTInductorModelContainerRun). AOTIRuntimeError AOTInductorModelRun( AOTInductorModelHandle model_handle, AtenTensorHandle* input_handles, AtenTensorHandle* output_handles); // Replace AOTInductorModel's constant map. Note it doesn't handle concurrency // so be sure to handle ordering if AOTInductorModelRun is ran concurrently. AOTIRuntimeError AOTInductorModelUpdateConstantsMap( AOTInductorModelHandle model_handle, AOTInductorConstantMapHandle constant_map_handle); // Delete an AOTInductorModel created by AOTInductorModelCreate. AOTIRuntimeError AOTInductorModelDelete(AOTInductorModelHandle model_handle); AOTIRuntimeError AOTInductorModelGetNumOutputs( AOTInductorModelHandle model_handle, size_t* ret_num_outputs); AOTIRuntimeError AOTInductorModelContainerGetCallSpec( AOTInductorModelContainerHandle container_handle, const char** in_spec, const char** out_spec); } // extern "C" ```
============================================================================================================================================== SOURCE CODE FILE: model.h LINES: 1 SIZE: 22.77 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\model.h ENCODING: utf-8 ```h #pragma once #include <dlfcn.h> #include <fcntl.h> #include <sys/mman.h> #include <unistd.h> #include <optional> #include <regex> #include <stdexcept> #include <unordered_map> #include <utility> // WARNING: Be careful when adding new includes here. This header will be used // in model.so, and should not refer to any aten/c10 headers except the stable // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule // applies to other files under torch/csrc/inductor/aoti_runtime/. #include <torch/csrc/inductor/aoti_runtime/device_utils.h> #ifdef USE_XPU #include <torch/csrc/inductor/aoti_runtime/utils_xpu.h> #else #include <torch/csrc/inductor/aoti_runtime/utils.h> #endif #define AOTI_RUNTIME_CHECK(EXPR, MSG) \ do { \ bool ok = EXPR; \ if (!ok) { \ throw std::runtime_error(MSG); \ } \ } while (0) // At codegen time, we write out a binary file called constants.bin. // We then turn the raw binary to an object file that exposes this // symbol and link it into the final .so. // For information on the binary format, see `man objcopy`, under // the "binary-architecture" flag: // https://man7.org/linux/man-pages/man1/objcopy.1.html // todo: use #embed in C++ 23 once available // The constants are NOT readonly because they may be mutated. // NOLINTNEXTLINE(*array*) extern uint8_t _binary_constants_bin_start[]; // NOLINTNEXTLINE(*array*) extern uint8_t _binary_constants_bin_end[]; #if defined(USE_CUDA) || defined(USE_XPU) // Compute required blob size with 64-alignment if on GPU. #define AOTI_CONST_ALIGNMENT 64 #else // Use 64-alignment (use something >=64)for better performance on CPU. #define AOTI_CONST_ALIGNMENT 64 #endif namespace { using RAIIDataPtr = std::unique_ptr<void, std::function<void(void*)>>; #ifdef USE_CUDA RAIIDataPtr RAII_gpuMalloc(size_t num_bytes) { void* data_ptr; AOTI_RUNTIME_DEVICE_CHECK(cudaMalloc((void**)&data_ptr, num_bytes)); auto deleter = [](void* ptr) { AOTI_RUNTIME_DEVICE_CHECK(cudaFree(ptr)); }; return RAIIDataPtr(data_ptr, deleter); } #elif defined(USE_XPU) RAIIDataPtr RAII_gpuMalloc(size_t num_bytes) { sycl::queue* queue_ptr = nullptr; aoti_torch_get_current_sycl_queue((void**)&queue_ptr); void* data_ptr = sycl::malloc_device(num_bytes, *queue_ptr); auto deleter = [queue_ptr](void* ptr) { sycl::free(ptr, *queue_ptr); }; return RAIIDataPtr(data_ptr, deleter); } #else RAIIDataPtr RAII_cpuMalloc(size_t num_bytes) { void* data_ptr = std::malloc(num_bytes); if (!data_ptr) { throw std::bad_alloc(); } auto deleter = [](void* ptr) { std::free(ptr); }; return RAIIDataPtr(data_ptr, deleter); } #endif // USE_CUDA } // anonymous namespace namespace torch::aot_inductor { enum ConstantType : uint8_t { Unknown = 0, Parameter = 1, Buffer = 2, TensorConstant = 3, FoldedConstant = 4, }; using ConstantMap = std::unordered_map<std::string, RAIIAtenTensorHandle>; // valid device strs are: cpu, cuda, cuda:0, cuda:1, ... // Update the list here if more devices are supported in the future inline void parse_device_str( const std::string& device_str, int32_t& device_type, int32_t& device_idx) { std::regex re("(cpu|cuda|xpu)(:([0-9]+))?"); std::smatch sm; bool matched = std::regex_match(device_str, sm, re); AOTI_RUNTIME_CHECK(matched, "Invalid device: " + device_str); if (sm[1].str() == "cpu") { device_type = aoti_torch_device_type_cpu(); } else if (sm[1].str() == "cuda") { device_type = aoti_torch_device_type_cuda(); #ifdef USE_XPU } else if (sm[1].str() == "xpu") { device_type = aoti_torch_device_type_xpu(); #endif } else { AOTI_RUNTIME_CHECK(false, "Invalid device: " + device_str); } if (sm[3].matched) { device_idx = stoi(sm[3].str()); } else { device_idx = -1; } } // Defines the base class for AOTInductorModel, which is generated by the // AOTInductor cpp codegen. Since we do not need dynamic dispatch, we rely // on curiously recurring template pattern (CRTP) to save some runtime // v-table overhead. The generated AOTInductorModel is specialized with // methods such as run_impl. template <typename Model> class AOTInductorModelBase { public: AOTInductorModelBase( size_t num_inputs, size_t num_outputs, size_t num_constants, const std::string& device_str, std::optional<std::string> cubin_dir, bool include_weights = true) : inputs_info_(num_inputs), outputs_info_(num_outputs), constants_info_(num_constants), cubin_dir_(std::move(cubin_dir)), include_weights(include_weights) { parse_device_str(device_str, device_type_, device_idx_); #ifdef USE_CUDA if (device_idx_ == -1) { AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx_)); } else { // If device_idx_ is passed in, we need to set the current device to it AOTI_RUNTIME_DEVICE_CHECK(cudaSetDevice(device_idx_)); } #endif // USE_CUDA #ifdef USE_XPU if (device_idx_ == -1) { aoti_torch_get_current_xpu_device(&device_idx_); } else { aoti_torch_set_current_xpu_device(device_idx_); } #endif // USE_XPU } // NOLINTNEXTLINE(modernize-use-equals-default) ~AOTInductorModelBase() { #ifdef USE_CUDA if (run_finished_) { auto code = cudaEventDestroy(*run_finished_); if (code != cudaSuccess) { std::cerr << "Failed to destroy CUDA event in AOTInductor model: " << cudaGetErrorString(code) << std::endl; } } #endif // USE_CUDA #ifdef USE_XPU if (run_finished_) { (*run_finished_)->wait_and_throw(); delete *run_finished_; } #endif // USE_XPU } AOTInductorModelBase(AOTInductorModelBase&&) = delete; AOTInductorModelBase& operator=(AOTInductorModelBase&&) = delete; AOTInductorModelBase(const AOTInductorModelBase&) = delete; AOTInductorModelBase& operator=(const AOTInductorModelBase&) = delete; void run( AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles // are stolen; the array itself is borrowed AtenTensorHandle* output_handles, // array for writing output AtenTensorHandle; handles // will be stolen by the caller; the array itself is // borrowed DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor) { #ifdef USE_CUDA if (!run_finished_) { cudaEvent_t run_finished; AOTI_RUNTIME_DEVICE_CHECK(cudaEventCreate(&run_finished)); run_finished_.emplace(run_finished); } #elif defined(USE_XPU) if (run_finished_) { (*run_finished_)->wait_and_throw(); delete *run_finished_; run_finished_.reset(); } #else // !USE_CUDA && !USE_XPU run_finished_ = false; #endif auto* model = static_cast<Model*>(this); model->run_impl(input_handles, output_handles, stream, proxy_executor); #ifdef USE_CUDA AOTI_RUNTIME_DEVICE_CHECK(cudaEventRecord(*run_finished_, stream)); #elif defined(USE_XPU) run_finished_ = std::make_optional<sycl::event*>(new sycl::event( static_cast<sycl::queue*>(stream)->ext_oneapi_submit_barrier())); #else // !USE_CUDA && !USE_XPU run_finished_ = true; #endif // USE_CUDA } // Non-thread-aware variant of run(). Obviously unsafe to use in a threaded // environment :) void run_single_threaded( AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles // are stolen; the array itself is borrowed AtenTensorHandle* output_handles, // array for writing output AtenTensorHandle; handles // will be stolen by the caller; the array itself is // borrowed DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor) { // don't bother with any of the run_finished stuff; this is unsafe to call // in a threaded context auto* model = static_cast<Model*>(this); model->run_impl(input_handles, output_handles, stream, proxy_executor); } std::unordered_map<std::string, AtenTensorHandle> run_const_fold( DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor, bool initialization = false) { #ifdef USE_CUDA if (!run_finished_) { cudaEvent_t run_finished; AOTI_RUNTIME_DEVICE_CHECK(cudaEventCreate(&run_finished)); run_finished_.emplace(run_finished); } #elif defined(USE_XPU) if (run_finished_) { (*run_finished_)->wait_and_throw(); delete *run_finished_; run_finished_.reset(); } #else // !USE_CUDA && !USE_XPU run_finished_ = false; #endif auto* model = static_cast<Model*>(this); auto folded_constants = model->const_run_impl(stream, proxy_executor, initialization); #ifdef USE_CUDA AOTI_RUNTIME_DEVICE_CHECK(cudaEventRecord(*run_finished_, stream)); #elif defined(USE_XPU) // sycl::queue* queue_ptr = nullptr; // aoti_torch_get_current_sycl_queue((void**)&queue_ptr); run_finished_ = std::make_optional<sycl::event*>(new sycl::event( static_cast<sycl::queue*>(stream)->ext_oneapi_submit_barrier())); #else // !USE_CUDA && !USE_XPU run_finished_ = true; #endif // USE_CUDA return folded_constants; } void load_constants() { size_t num_constants = this->num_constants(); constants_map_->reserve(num_constants); std::vector<size_t> constants_internal_offset(num_constants); size_t blob_size = 0; compute_constant_blob(blob_size, constants_internal_offset); #if defined(USE_CUDA) || defined(USE_XPU) constant_blob_ = RAII_gpuMalloc(blob_size); #else constant_blob_ = RAII_cpuMalloc(blob_size); #endif if (!include_weights) { return; } size_t bytes_read = 0; for (size_t i = 0; i < num_constants; i++) { bool from_folded = this->constant_from_folded(i); if (from_folded) { continue; } std::string name = this->constant_name(i); size_t data_size = this->constant_data_size(i); uint8_t* internal_ptr = (data_size != 0) ? constant_ptr( constants_internal_offset[i], bytes_read, data_size, from_folded) : nullptr; bytes_read += data_size; // Create at::Tensor from copied memory. auto dtype = this->constant_dtype(i); auto ndim = this->constant_ndim(i); auto size = this->constant_shape(i); auto stride = this->constant_stride(i); auto offset = this->constant_offset(i); auto layout = this->constant_layout(i); auto opaque_metadata_ptr = this->opaque_metadata(i); auto opaque_metadata_size = this->opaque_metadata_size(i); AtenTensorHandle tensor_handle = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob_v2( internal_ptr, ndim, size, stride, offset, dtype, device_type_, device_idx_, &tensor_handle, layout, opaque_metadata_ptr, opaque_metadata_size)); constants_map_->emplace(std::move(name), tensor_handle); } if (constants_map_) { this->update_constants_array_from_map(); } } RAIIDataPtr&& release_constant_blob() { return std::move(constant_blob_); } std::shared_ptr<std::vector<ConstantHandle>> get_constants_array() { return constants_; } int32_t get_device_type() const { return device_type_; } int32_t get_device_idx() const { return device_idx_; } uint8_t* constant_ptr( size_t constant_offset, size_t bytes_read, size_t data_size, bool skip_copy) { auto* constants_ptr = static_cast<uint8_t*>(constant_blob_.get()); uint8_t* internal_ptr = constants_ptr + constant_offset; // TODO: Handle shared storage case. if (!skip_copy) { #ifdef USE_XPU sycl::queue* queue_ptr = nullptr; aoti_torch_get_current_sycl_queue((void**)&queue_ptr); queue_ptr ->memcpy(internal_ptr, _get_constants_start() + bytes_read, data_size) .wait(); #elif USE_CUDA AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy( internal_ptr, _get_constants_start() + bytes_read, data_size, cudaMemcpyHostToDevice)); #else memcpy(internal_ptr, _get_constants_start() + bytes_read, data_size); #endif } return internal_ptr; } void compute_constant_blob( size_t& blob_size, std::vector<size_t>& constants_internal_offset) { size_t num_constants = this->num_constants(); blob_size = 0; for (size_t i = 0; i < num_constants; i++) { size_t data_size = this->constant_data_size(i); if (data_size % AOTI_CONST_ALIGNMENT) { data_size = AOTI_CONST_ALIGNMENT + (data_size / AOTI_CONST_ALIGNMENT) * AOTI_CONST_ALIGNMENT; } constants_internal_offset[i] = blob_size; blob_size += data_size; } } size_t num_inputs() const { return inputs_info_.size(); } size_t num_outputs() const { return outputs_info_.size(); } size_t num_constants() const { return constants_info_.size(); } const char* input_name(int64_t idx) const { return inputs_info_.at(idx).name; } const char* output_name(int64_t idx) const { return outputs_info_.at(idx).name; } const char* constant_name(int64_t idx) const { return constants_info_.at(idx).name; } size_t constant_ndim(int64_t idx) { return constants_info_.at(idx).shape.size(); } const int64_t* constant_shape(int64_t idx) const { return constants_info_.at(idx).shape.data(); } const int64_t* constant_stride(int64_t idx) const { return constants_info_.at(idx).stride.data(); } int32_t constant_dtype(int64_t idx) const { return constants_info_.at(idx).dtype; } int32_t constant_layout(int64_t idx) const { return constants_info_.at(idx).layout; } size_t constant_offset(int64_t idx) const { return constants_info_.at(idx).offset; } size_t constant_data_size(int64_t idx) const { return constants_info_.at(idx).data_size; } const char* constant_original_fqn(int64_t idx) const { return constants_info_.at(idx).original_fqn; } const uint8_t* opaque_metadata(int64_t idx) const { return constants_info_.at(idx).opaque_metadata.data(); } size_t opaque_metadata_size(int64_t idx) { return constants_info_.at(idx).opaque_metadata.size(); } bool constant_from_folded(int64_t idx) const { return constants_info_.at(idx).from_folded; } int32_t constant_type(int64_t idx) const { return constants_info_.at(idx).type; } const char* get_in_spec() const { return in_spec_.c_str(); } const char* get_out_spec() const { return out_spec_.c_str(); } void update_constants_array_from_map() { if (!constants_map_) { throw std::runtime_error{ "constants_map_ was not ready when constants_ is trying to be constructed from it!"}; } if (!constants_) { constants_ = std::make_shared<std::vector<ConstantHandle>>(constants_info_.size()); } else { constants_->resize(constants_info_.size()); } int idx = 0; for (const auto& info : constants_info_) { const auto it = constants_map_->find(info.name); if (it != constants_map_->end()) { constants_->at(idx) = ConstantHandle(it->second); } idx++; } } void update_constants_map( std::shared_ptr<ConstantMap> constants_map, bool remap_constants_array = true) { constants_map_ = std::move(constants_map); if (remap_constants_array) { update_constants_array_from_map(); } } // This function allows us to update the constants_ that is used to look up // the corresponding constant tensor during runtime. void update_constants_array( std::shared_ptr<std::vector<ConstantHandle>> constants_array) { constants_ = std::move(constants_array); } /// Returns true if the model is complete. bool is_finished() { #ifdef USE_CUDA if (!run_finished_) { throw std::runtime_error{"Model CUDA event was not initialized"}; } auto event_status = cudaEventQuery(*run_finished_); if (event_status == cudaSuccess) { return true; } else if (event_status == cudaErrorNotReady) { return false; } throw std::runtime_error( std::string("The model did not finish successfully. Error: ") + cudaGetErrorString(cudaGetLastError())); #elif defined(USE_XPU) if (!run_finished_) { throw std::runtime_error{"Model XPU event was not initialized"}; } using namespace sycl::info; return (*run_finished_)->get_info<event::command_execution_status>() == event_command_status::complete; #else // !USE_CUDA && !USE_XPU return run_finished_; #endif // USE_CUDA } /// Synchronizes completion event. void wait_for_completion() { #ifdef USE_CUDA if (!run_finished_) { throw std::runtime_error{"Model event was not initialized"}; } AOTI_RUNTIME_DEVICE_CHECK(cudaEventSynchronize(*run_finished_)); #endif // USE_CUDA #ifdef USE_XPU if (!run_finished_) { throw std::runtime_error{"Model event was not initialized"}; } (*run_finished_)->wait_and_throw(); #endif } protected: uint8_t* _get_constants_start() { #ifndef USE_MMAP_SELF // NOLINTNEXTLINE(*const-cast*) return const_cast<uint8_t*>(_binary_constants_bin_start); #else if (self_mmap) { return self_mmap; } Dl_info dl_info; // get pointer to constant which are appended to the binary AOTI_RUNTIME_CHECK( dladdr(__func__, &dl_info), "Can't find shared library name"); int fd = open(dl_info.dli_fname, O_RDONLY); AOTI_RUNTIME_CHECK(fd >= 0, "Shared library file cannot be opened"); auto fsize = lseek(fd, 0, SEEK_END); auto weights_size = reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[0]; auto magic_number = reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[1]; auto weights_offset = fsize - weights_size; AOTI_RUNTIME_CHECK( (weights_offset & 0x3fff) == 0, "weights_offset must be aligned to 16K boundary"); auto ptr = mmap( NULL, weights_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, weights_offset); close(fd); AOTI_RUNTIME_CHECK(ptr != MAP_FAILED, "mmap() failed"); self_mmap = static_cast<uint8_t*>(ptr); AOTI_RUNTIME_CHECK( reinterpret_cast<uint64_t*>( self_mmap + weights_size - sizeof(uint64_t))[0] == magic_number, "Weigths data seems corrupt"); return self_mmap; #endif } struct ParamInfo { const char* name = nullptr; }; struct ConstInfo { const char* name = nullptr; std::vector<int64_t> shape; std::vector<int64_t> stride; int32_t dtype{}; int64_t offset{}; size_t data_size{}; int32_t layout{}; std::vector<uint8_t> opaque_metadata; int64_t opaque_metadata_size{}; const char* original_fqn = nullptr; bool from_folded{}; int32_t type{}; }; std::vector<ParamInfo> inputs_info_; std::vector<ParamInfo> outputs_info_; std::vector<ConstInfo> constants_info_; std::string in_spec_; std::string out_spec_; std::shared_ptr<ConstantMap> constants_map_; std::shared_ptr<std::vector<ConstantHandle>> constants_; // Holds the blob storage for constants' at::Tensor. RAIIDataPtr constant_blob_; #ifdef USE_MMAP_SELF uint8_t* self_mmap = NULL; #endif // A directory with CUDA binary files, e.g. compiled kernels, etc. const std::optional<std::string> cubin_dir_; // This is the flag that implies whether the weight is included in the model. // If True, we would prepare the weight when loading the model, otherwise the // model will be loaded without weights, and need to be provided by the user. bool include_weights; // Record if the model finishes an inference run so that its owning // AOTModelContainer can re-use this instance. #ifdef USE_CUDA std::optional<cudaEvent_t> run_finished_; #elif defined(USE_XPU) std::optional<sycl::event*> run_finished_; #else // !USE_CUDA bool run_finished_{}; #endif // Generated model uses this device index to create CUDA guards. int32_t device_type_{}; int32_t device_idx_{}; }; // Codegen-ed classes can derive from this to keep pointers to loaded kernels. class AOTInductorModelKernelsBase { public: virtual ~AOTInductorModelKernelsBase() = default; }; class AOTInductorModel : public AOTInductorModelBase<AOTInductorModel> { public: AOTInductorModel( std::shared_ptr<ConstantMap> constants_map, std::shared_ptr<std::vector<ConstantHandle>> constants_array, const std::string& device_str, std::optional<std::string> cubin_dir, bool include_weights = true); std::unordered_map<std::string, AtenTensorHandle> const_run_impl( DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor, bool initialization = false); void _const_run_impl( std::vector<AtenTensorHandle>& output_handles, DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor); void run_impl( AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles // are stolen; the array itself is borrowed AtenTensorHandle* output_handles, // array for writing output AtenTensorHandle; handles // will be stolen by the caller; the array itself is // borrowed DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor); template <typename Inputs, typename Outputs> Outputs run_impl_minimal_arrayref_interface( const Inputs& inputs, DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor); static std::unique_ptr<AOTInductorModel> Create( std::shared_ptr<ConstantMap> constants_map, std::shared_ptr<std::vector<ConstantHandle>> constants_array, const std::string& device_str, std::optional<std::string> cubin_dir) { return std::make_unique<AOTInductorModel>( std::move(constants_map), std::move(constants_array), device_str, std::move(cubin_dir)); } private: std::unique_ptr<AOTInductorModelKernelsBase> kernels_; }; } // namespace torch::aot_inductor ```
======================================================================================================================================================== SOURCE CODE FILE: model_container.h LINES: 2 SIZE: 22.47 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\model_container.h ENCODING: utf-8 ```h #pragma once #include <algorithm> #include <condition_variable> #include <deque> #include <mutex> #include <shared_mutex> // WARNING: Be careful when adding new includes here. This header will be used // in model.so, and should not refer to any aten/c10 headers except the stable // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule // applies to other files under torch/csrc/inductor/aoti_runtime/. #include <torch/csrc/inductor/aoti_runtime/model.h> namespace torch::aot_inductor { class AOTInductorModelContainer { public: AOTInductorModelContainer( size_t num_models, const std::string& device_str, const std::optional<std::string>& cubin_dir = std::nullopt) { constants_map_ = std::make_shared<ConstantMap>(); constants_array_ = std::make_shared<std::vector<ConstantHandle>>(); models_.reserve(num_models); available_models_.reserve(num_models); for (size_t i = 0; i < num_models; ++i) { models_.push_back(AOTInductorModel::Create( constants_map_, constants_array_, device_str, cubin_dir)); available_models_.push_back(models_.back().get()); } // Note that the all following fields (input_names_, output_names, // etc) can be filled in by the AOT // codegen. However, we choose to query such information from // the owned AOTInductorModel for a couple of reasons: // * simplify the codegen templates // * reduce information fragmentation and duplication // * the initialization process below is done only once when the container // is constructed, so it would have little performance impact auto* model = available_models_[0]; size_t num_inputs = model->num_inputs(); input_names_.reserve(num_inputs); for (size_t i = 0; i < num_inputs; i++) { input_names_.emplace_back(model->input_name(static_cast<int64_t>(i))); } size_t num_outputs = model->num_outputs(); output_names_.reserve(num_outputs); for (size_t i = 0; i < num_outputs; i++) { output_names_.emplace_back(model->output_name(static_cast<int64_t>(i))); } model->load_constants(); constant_blob_ = model->release_constant_blob(); constants_internal_offset_.resize(model->num_constants()); model->compute_constant_blob(blob_size_, constants_internal_offset_); for (auto& model : models_) { model->update_constants_map(constants_map_); } in_spec_ = model->get_in_spec(); out_spec_ = model->get_out_spec(); } void run( AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles // are stolen; the array itself is borrowed AtenTensorHandle* output_handles, // array for writing output AtenTensorHandle; handles // will be stolen by the caller; the array itself is // borrowed DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor) { std::shared_lock model_lk(model_exec_mutex_); auto* model = get_available_model(); if (!constant_folded_) { // At this point, constant is not ready yet. We need to call constant // folding before we execute the model. We obtain a unique lock at this // point to make sure constant is ready for all. model_lk.unlock(); std::unique_lock constants_folding_lk(model_exec_mutex_); // Double locking to make sure constant folding is only ran once. if (!constant_folded_) { auto folded_const_map = model->run_const_fold( stream, proxy_executor, /* initialization = */ true); update_constant_buffer( std::move(folded_const_map), /* use_inactive = */ false, /* validate_full_update = */ false); constant_folded_ = true; } constants_folding_lk.unlock(); model_lk.lock(); } try { model->run(input_handles, output_handles, stream, proxy_executor); } catch (...) { std::lock_guard lk(models_mutex_); available_models_.push_back(model); throw; } { std::lock_guard lk(models_mutex_); pending_models_.push_back(model); } pending_models_available_.notify_one(); } // Non-thread-aware variant of run(). Obviously unsafe to use in a threaded // environment :) void run_single_threaded( AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles // are stolen; the array itself is borrowed AtenTensorHandle* output_handles, // array for writing output AtenTensorHandle; handles // will be stolen by the caller; the array itself is // borrowed DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor) { auto* model = available_models_[0]; if (!constant_folded_) { auto folded_const_map = model->run_const_fold( stream, proxy_executor, /* initialization = */ true); update_constant_buffer( std::move(folded_const_map), /* use_inactive = */ false, /* validate_full_update = */ false); constant_folded_ = true; } model->run_single_threaded( input_handles, output_handles, stream, proxy_executor); } size_t num_constants() const { if (this->num_models() == 0) { throw std::runtime_error("No available models in container!"); } return models_[0]->num_constants(); } // retrieve the constant name of constants_info_[idx] const char* constant_name(size_t idx) const { if (this->num_models() == 0) { throw std::runtime_error("No available models in container!"); } return models_[0]->constant_name(static_cast<int64_t>(idx)); } // retrieve original FQN of constants_info_[idx] const char* constant_original_fqn(size_t idx) const { if (this->num_models() == 0) { throw std::runtime_error("No available models in container!"); } return models_[0]->constant_original_fqn(static_cast<int64_t>(idx)); } // retrieve whether constant is from folded of constants_info_[idx] bool constant_from_folded(size_t idx) const { if (this->num_models() == 0) { throw std::runtime_error("No available models in container!"); } return models_[0]->constant_from_folded(static_cast<int64_t>(idx)); } // retrieve type of constants_info_[idx] int32_t constant_type(size_t idx) const { if (this->num_models() == 0) { throw std::runtime_error("No available models in container!"); } return models_[0]->constant_type(static_cast<int64_t>(idx)); } // retrieve dtype of constants_info_[idx] int32_t constant_dtype(size_t idx) const { if (this->num_models() == 0) { throw std::runtime_error("No available models in container!"); } return models_[0]->constant_dtype(static_cast<int64_t>(idx)); } void run_const_fold( bool inactive_buffer, DeviceStreamType stream, AOTIProxyExecutorHandle proxy_executor) { std::shared_lock model_lk(model_exec_mutex_); auto* model = get_available_model(); if (!inactive_buffer) { // We would need to acquire a unique lock if we want to run constant // folding on the active buffer. model_lk.unlock(); std::unique_lock constants_folding_lk(model_exec_mutex_); try { auto folded_const_map = model->run_const_fold(stream, proxy_executor); update_constant_buffer( std::move(folded_const_map), /* use_inactive = */ false, /* validate_full_update = */ false); } catch (...) { std::lock_guard lk(models_mutex_); available_models_.push_back(model); throw; } constants_folding_lk.unlock(); model_lk.lock(); } else { // We swap the constant mapping to the inactive buffer in the model to run // const run. auto constants_map = get_constants_map(/* get_inactive= */ true); auto constants_array = get_constants_array(/* get_inactive= */ true); try { model->update_constants_map( constants_map, /* remap_constants_array= */ false); model->update_constants_array(constants_array); auto folded_const_map = model->run_const_fold(stream, proxy_executor); update_constant_buffer( std::move(folded_const_map), /* use_inactive = */ true, /* validate_full_update = */ false); // Swap back the model's constants mapping constants_map = get_constants_map(/* get_inactive= */ false); constants_array = get_constants_array(/* get_inactive= */ false); model->update_constants_map( constants_map, /* remap_constants_array= */ false); model->update_constants_array(constants_array); } catch (...) { std::lock_guard lk(models_mutex_); available_models_.push_back(model); throw; } } { std::lock_guard lk(models_mutex_); pending_models_.push_back(model); } pending_models_available_.notify_one(); } bool _should_skip_update(const size_t idx) const { auto constant_type = models_[0]->constant_type(static_cast<int64_t>(idx)); // We should skip constants return constant_type == ConstantType::TensorConstant; } bool _could_skip_update(const size_t idx) const { auto constant_type = models_[0]->constant_type(static_cast<int64_t>(idx)); // Buffer can be optionally skipped, so if it not provided by upstream // services, it is OK to relax the check. return constant_type == ConstantType::Buffer; } void assert_all_constants( const std::unordered_map<std::string, AtenTensorHandle>& constants_map) { auto num_constants = models_[0]->num_constants(); for (size_t idx = 0; idx < num_constants; idx++) { if (models_[0]->constant_from_folded(static_cast<int64_t>(idx))) { continue; } auto constant_name = std::string(models_[0]->constant_name(static_cast<int64_t>(idx))); auto it = constants_map.find(constant_name); if (it == constants_map.end()) { if (_should_skip_update(idx) || _could_skip_update(idx)) { // tracing sometimes creates tensors that are non-existent in // original graph. We could skip those and do a direct copy. std::cerr << "[WARNING] Found constant or module state buffer " << constant_name << " in model, but not provided by user!\n"; continue; } throw std::runtime_error( std::string("Cannot find constants ") + constant_name + std::string(" in constants_map!")); } } } // We directly take ownership from AtenTensorHandle if constants are moved. void update_constant_buffer( std::unordered_map<std::string, AtenTensorHandle>&& constants_map, bool use_inactive, bool validate_full_update) { if (this->num_models() == 0) { throw std::runtime_error("No model available in container!"); } if (validate_full_update) { assert_all_constants(constants_map); } auto original_constants_map = get_constants_map(!use_inactive); auto constants_map_to_update = get_constants_map(use_inactive); auto num_constants = models_[0]->num_constants(); for (size_t idx = 0; idx < num_constants; idx++) { auto constant_name = std::string(models_[0]->constant_name(static_cast<int64_t>(idx))); auto it = constants_map.find(constant_name); if (it == constants_map.end() && !use_inactive) { continue; } AtenTensorHandle tensor; if (it == constants_map.end() && use_inactive) { aoti_torch_clone( original_constants_map->find(constant_name)->second.get(), &tensor); } else { tensor = it->second; } constants_map_to_update->insert_or_assign(constant_name, tensor); } // Update the inactive constant array. update_array_from_map( get_constants_array(use_inactive), constants_map_to_update); } // This function updates the buffer for storing constants. // It will update the buffer, the mapping and the array mapping. void update_constant_buffer( const std::unordered_map<std::string, AtenTensorHandle>& constants_map, bool use_inactive, bool validate_full_update) { if (this->num_models() == 0) { throw std::runtime_error("No model available in container!"); } if (validate_full_update) { assert_all_constants(constants_map); } auto original_constants_map = get_constants_map(!use_inactive); auto constants_map_to_update = get_constants_map(use_inactive); auto num_constants = models_[0]->num_constants(); for (size_t idx = 0; idx < num_constants; idx++) { auto constant_name = std::string(models_[0]->constant_name(static_cast<int64_t>(idx))); auto it = constants_map.find(constant_name); if (it == constants_map.end() && !use_inactive) { continue; } AtenTensorHandle tensor; if (it == constants_map.end() && use_inactive) { tensor = original_constants_map->find(constant_name)->second.get(); } else { tensor = it->second; } auto* constants_blob_ptr = static_cast<uint8_t*>(get_constant_blob_ptr(use_inactive)); // Move the data to container handled blob. uint8_t* internal_constants_ptr = constants_blob_ptr + constants_internal_offset_[idx]; void* user_constant_ptr; int64_t constant_size; aoti_torch_get_data_ptr(tensor, &user_constant_ptr); aoti_torch_get_storage_size(tensor, &constant_size); #ifdef USE_XPU sycl::queue* queue_ptr = nullptr; aoti_torch_get_current_sycl_queue((void**)&queue_ptr); queue_ptr ->memcpy(internal_constants_ptr, user_constant_ptr, constant_size) .wait(); #elif USE_CUDA AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy( internal_constants_ptr, user_constant_ptr, constant_size, cudaMemcpyDefault)); #else memcpy(internal_constants_ptr, user_constant_ptr, constant_size); #endif // Generate Tensor from container handled blob. // We extract stride and offset from provided Tensor since we do not // guarantee that the tensor is contiguous. AtenTensorHandle tensor_handle; int64_t* stride; int64_t offset; int device_type = models_[0]->get_device_type(); int device_idx = models_[0]->get_device_idx(); AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides(tensor, &stride)); AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_get_storage_offset(tensor, &offset)); AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob( internal_constants_ptr, models_[0]->constant_ndim(idx), models_[0]->constant_shape(idx), stride, offset, models_[0]->constant_dtype(idx), device_type, device_idx, &tensor_handle)); // Now place the tensor to constants_map. Note at this point the ownership // of the tensor_handle will be taken over. constants_map_to_update->insert_or_assign(constant_name, tensor_handle); } // Update the inactive constant array. update_array_from_map( get_constants_array(use_inactive), constants_map_to_update); } void update_array_from_map( const std::shared_ptr<std::vector<ConstantHandle>>& constants_array, const std::shared_ptr<ConstantMap>& constants_map) { auto num_constants = models_[0]->num_constants(); for (size_t idx = 0; idx < num_constants; idx++) { if (constants_map->find(models_[0]->constant_name( static_cast<int64_t>(idx))) != constants_map->end()) { constants_array->at(idx) = ConstantHandle( constants_map ->find(models_[0]->constant_name(static_cast<int64_t>(idx))) ->second); } } } void swap_constant_buffer() { std::lock_guard unique_lk(model_exec_mutex_); auto constants_map = get_constants_map(/* get_inactive= */ true); auto constants_array = get_constants_array(/* get_inactive= */ true); for (auto& model : models_) { model->update_constants_map( constants_map, /* remap_constants_array = */ false); model->update_constants_array(constants_array); } use_secondary_ = !use_secondary_; } size_t num_inputs() const { return input_names_.size(); } size_t num_outputs() const { return output_names_.size(); } const char* input_name(size_t idx) const { return input_names_.at(idx).c_str(); } const char* output_name(size_t idx) const { return output_names_.at(idx).c_str(); } size_t num_models() const { return models_.size(); } const char* get_in_spec() const { return in_spec_; } const char* get_out_spec() const { return out_spec_; } private: std::vector<std::string> input_names_; std::vector<std::string> output_names_; const char* in_spec_; const char* out_spec_; // Holds the blob storage for constants' at::Tensor within the container. // This blob of memory will be managed by the container. RAIIDataPtr constant_blob_; RAIIDataPtr constant_blob_secondary_; size_t blob_size_; std::vector<size_t> constants_internal_offset_; // Determine which constants is being used for the model. // If true, // constants_map_secondary/constant_blob_secondary/constants_array_secondary // is being used. bool use_secondary_{false}; // Determine whether we have ran constant folding bool constant_folded_{false}; // Holds the mapping of constants to at::Tensor. // The underlying data of at::Tensor is in either constant_blob_ (for CUDA). // or _binary_constants_bin_start (for CPU). std::shared_ptr<ConstantMap> constants_map_; std::shared_ptr<ConstantMap> constants_map_secondary_; // Holds the indexed array of constant for faster lookup during runtime. std::shared_ptr<std::vector<ConstantHandle>> constants_array_; std::shared_ptr<std::vector<ConstantHandle>> constants_array_secondary_; // Holds all the AOTInductorModel instances owned by this container. std::vector<std::unique_ptr<AOTInductorModel>> models_; // Holds the AOTInductorModel instances available for inference. std::vector<AOTInductorModel*> available_models_; // Holds the AOTInductorModel instances that have started running // inference and can be placed onto available_models_ upon their // completion. std::deque<AOTInductorModel*> pending_models_; // Protects available_models_ and pending_models_. std::mutex models_mutex_; // Notified whenever a model is placed onto pending_models_. std::condition_variable pending_models_available_; AOTInductorModel* get_available_model() { std::unique_lock lk(models_mutex_); if (available_models_.empty()) { reclaim_finished_models(lk); } auto* result = available_models_.back(); available_models_.pop_back(); return result; } // This mutex is used to protect execution of model. // We acquire the mutex in shared mode if we allow concurrent execution. // We acquire the mutex in unique mode when we want exclusive access of the // model. One such case is when we want to do a weight swapping. We want to // make sure no one is executing the model. std::shared_mutex model_exec_mutex_; void* get_constant_blob_ptr(bool get_inactive) { if ((get_inactive && use_secondary_) || (!get_inactive && !use_secondary_)) { return constant_blob_.get(); } else { if (!constant_blob_secondary_) { #if defined(USE_CUDA) || defined(USE_XPU) constant_blob_secondary_ = RAII_gpuMalloc(blob_size_); #else constant_blob_secondary_ = RAII_cpuMalloc(blob_size_); #endif // USE_CUDA } return constant_blob_secondary_.get(); } } std::shared_ptr<ConstantMap> get_constants_map(bool get_inactive) { if ((get_inactive && use_secondary_) || (!get_inactive && !use_secondary_)) { return constants_map_; } else { if (!constants_map_secondary_) { constants_map_secondary_ = std::make_shared<ConstantMap>(); } return constants_map_secondary_; } } std::shared_ptr<std::vector<ConstantHandle>> get_constants_array( bool get_inactive) { if ((get_inactive && use_secondary_) || (!get_inactive && !use_secondary_)) { return constants_array_; } else { if (!constants_array_secondary_) { constants_array_secondary_ = std::make_shared<std::vector<ConstantHandle>>( models_[0]->num_constants()); } return constants_array_secondary_; } } void reclaim_finished_models(std::unique_lock<std::mutex>& lk) { #ifdef __aarch64__ // push finished model instances to the end of pending_models_ auto it = std::partition( pending_models_.begin(), pending_models_.end(), [](AOTInductorModel* m) { return !m->is_finished(); }); #else // push finished model instances to the end of pending_models_ auto it = std::stable_partition( pending_models_.begin(), pending_models_.end(), [](AOTInductorModel* m) { return !m->is_finished(); }); #endif if (it != pending_models_.end()) { // We have finished model instances that can be pushed into // available_models_ so that we don't have to be blocked on waiting // the pending_models_available_ condition. available_models_.insert( available_models_.end(), it, pending_models_.end()); pending_models_.erase(it, pending_models_.end()); return; } pending_models_available_.wait( lk, [this]() { return !pending_models_.empty(); }); // Let's make the schedule simple first. We always wait on the first // pending_models_ to be complete. auto* model = pending_models_.front(); pending_models_.pop_front(); lk.unlock(); try { model->wait_for_completion(); } catch (...) { lk.lock(); available_models_.push_back(model); throw; } lk.lock(); available_models_.push_back(model); } }; } // namespace torch::aot_inductor ```
========================================================================================================================================================= SOURCE CODE FILE: scalar_to_tensor.h LINES: 1 SIZE: 1.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\scalar_to_tensor.h ENCODING: utf-8 ```h #pragma once #include <c10/util/complex.h> #include <torch/csrc/inductor/aoti_runtime/utils.h> namespace torch::aot_inductor { template <typename T> inline RAIIAtenTensorHandle scalar_to_tensor_handle(T value) { throw std::runtime_error("Unsupported scalar_to_tensor_handle"); } // Specialize for supported C++ primitive types #define AOTI_RUNTIME_SCALAR_TO_TENSOR(dtype, ctype) \ template <> \ inline RAIIAtenTensorHandle scalar_to_tensor_handle<ctype>(ctype value) { \ AtenTensorHandle tensor_handle; \ AOTI_TORCH_ERROR_CODE_CHECK( \ aoti_torch_scalar_to_tensor_##dtype(value, &tensor_handle)); \ return RAIIAtenTensorHandle(tensor_handle); \ } AOTI_RUNTIME_SCALAR_TO_TENSOR(float32, float) AOTI_RUNTIME_SCALAR_TO_TENSOR(float64, double) AOTI_RUNTIME_SCALAR_TO_TENSOR(uint8, uint8_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(uint16, uint16_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(uint32, uint32_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(uint64, uint64_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(int8, int8_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(int16, int16_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(int32, int32_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(int64, int64_t) AOTI_RUNTIME_SCALAR_TO_TENSOR(bool, bool) AOTI_RUNTIME_SCALAR_TO_TENSOR(complex64, c10::complex<float>) AOTI_RUNTIME_SCALAR_TO_TENSOR(complex128, c10::complex<double>) #undef AOTI_RUNTIME_SCALAR_TO_TENSOR } // namespace torch::aot_inductor ```
============================================================================================================================================================== SOURCE CODE FILE: sycl_runtime_wrappers.h LINES: 1 SIZE: 6.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\sycl_runtime_wrappers.h ENCODING: utf-8 ```h // NOLINT #pragma once #ifdef USE_XPU #include <c10/xpu/XPUFunctions.h> #include <level_zero/ze_api.h> #include <sycl/sycl.hpp> #include <fstream> #include <iostream> #include <string> #define ZE_CHECK(status) \ { \ if (status != ZE_RESULT_SUCCESS) { \ std::stringstream ss; \ ss << "L0 runtime error: " << std::hex << std::uppercase << status; \ throw std::runtime_error(ss.str()); \ } \ } static ze_module_handle_t create_module( ze_context_handle_t context, ze_device_handle_t device, const uint8_t* binary_ptr, size_t binary_size) { const char* build_flags = ""; const ze_module_format_t format = ZE_MODULE_FORMAT_IL_SPIRV; ze_module_desc_t module_description = {}; module_description.stype = ZE_STRUCTURE_TYPE_MODULE_DESC; module_description.format = format; module_description.inputSize = binary_size; module_description.pInputModule = (uint8_t*)binary_ptr; module_description.pBuildFlags = build_flags; ze_module_build_log_handle_t buildlog = nullptr; ze_module_handle_t module = nullptr; auto context_initial = context; auto device_initial = device; auto error_no = ZE_RESULT_SUCCESS; error_no = zeModuleCreate(context, device, &module_description, &module, &buildlog); if (error_no != ZE_RESULT_SUCCESS) { size_t szLog = 0; ZE_CHECK(zeModuleBuildLogGetString(buildlog, &szLog, nullptr)); char* strLog = (char*)malloc(szLog); ZE_CHECK(zeModuleBuildLogGetString(buildlog, &szLog, strLog)); std::cerr << "L0 build module failed. Log: " << strLog << std::endl; free(strLog); } if (buildlog) { ZE_CHECK(zeModuleBuildLogDestroy(buildlog)); } ZE_CHECK(error_no); return module; } ze_kernel_handle_t create_function( ze_module_handle_t module, ze_kernel_flags_t flag, const std::string& func_name) { ze_kernel_handle_t kernel = nullptr; ze_kernel_desc_t kernel_description = {}; kernel_description.stype = ZE_STRUCTURE_TYPE_KERNEL_DESC; kernel_description.pNext = nullptr; kernel_description.flags = flag; kernel_description.pKernelName = func_name.c_str(); assert(module); ZE_CHECK(zeKernelCreate(module, &kernel_description, &kernel)); return kernel; } static ze_module_handle_t loadModule(std::string& spv_path) { sycl::device& sycl_device = c10::xpu::get_raw_device(c10::xpu::current_device()); auto sycl_context = sycl_device.get_platform().ext_oneapi_get_default_context(); auto l0_device = sycl::get_native<sycl::backend::ext_oneapi_level_zero>(sycl_device); auto l0_context = sycl::get_native<sycl::backend::ext_oneapi_level_zero>(sycl_context); std::ifstream IFS(spv_path.c_str(), std::ios::binary); std::ostringstream OSS; OSS << IFS.rdbuf(); std::string data(OSS.str()); return create_module( l0_context, l0_device, reinterpret_cast<const uint8_t*>(data.c_str()), data.size()); } static std::unique_ptr<sycl::kernel> getKernel( ze_module_handle_t l0_module, const char* kernel_name) { assert(l0_module); assert(kernel_name); auto l0_kernel = create_function(l0_module, ZE_KERNEL_FLAG_FORCE_RESIDENCY, kernel_name); sycl::device& sycl_device = c10::xpu::get_raw_device(c10::xpu::current_device()); auto sycl_context = sycl_device.get_platform().ext_oneapi_get_default_context(); auto mod = sycl::make_kernel_bundle< sycl::backend::ext_oneapi_level_zero, sycl::bundle_state::executable>( {l0_module, sycl::ext::oneapi::level_zero::ownership::transfer}, sycl_context); auto fun = sycl::make_kernel<sycl::backend::ext_oneapi_level_zero>( {mod, l0_kernel, sycl::ext::oneapi::level_zero::ownership::transfer}, sycl_context); return std::make_unique<sycl::kernel>(fun); } [[maybe_unused]] static std::unique_ptr<sycl::kernel> loadKernel( std::string filePath, const std::string& funcName, uint32_t sharedMemBytes, const std::optional<std::string>& binDir = std::nullopt) { if (binDir) { std::filesystem::path p1{*binDir}; std::filesystem::path p2{filePath}; filePath = (p1 / p2.filename()).string(); } auto mod = loadModule(filePath); return getKernel(mod, funcName.c_str()); } [[maybe_unused]] static void launchKernel( std::unique_ptr<sycl::kernel>& kernel_ptr, uint32_t grid_x, uint32_t grid_y, uint32_t grid_z, uint32_t num_warps, uint32_t shared_memory, void** params, sycl::queue* queue_ptr) { std::string kernel_name = kernel_ptr->get_info<sycl::info::kernel::function_name>(); // Currently threads_per_warp is hard code to 32 from torch.compile to triton // stack. int threads_per_warp = 32; uint32_t num_params = kernel_ptr->get_info<sycl::info::kernel::num_args>(); size_t global_range_x = grid_x * threads_per_warp * num_warps; size_t global_range_y = grid_y; size_t global_range_z = grid_z; size_t local_range_x = num_warps * threads_per_warp; size_t local_range_y = 1; size_t local_range_z = 1; sycl::range<3> global_range(global_range_z, global_range_y, global_range_x); sycl::range<3> local_range(local_range_z, local_range_y, local_range_x); sycl::nd_range<3> parallel_work_size(global_range, local_range); if (shared_memory) { // num_params from sycl info = user provided args + shared_memroy_buffer num_params -= 1; } // Submit the imported kernel. auto cgf = [&](sycl::handler& cgh) { for (uint32_t i = 0; i < num_params; ++i) { cgh.set_arg(i, *(static_cast<void**>(params[i]))); } if (shared_memory > 0) { constexpr int dimensions = 1; using share_mem_t = sycl::local_accessor<int8_t, dimensions>; share_mem_t local_buffer = share_mem_t(shared_memory, cgh); cgh.set_arg(num_params, local_buffer); cgh.parallel_for(parallel_work_size, *kernel_ptr); } else { cgh.parallel_for(parallel_work_size, *kernel_ptr); } }; auto event = queue_ptr->submit(cgf); } #endif ```
===================================================================================================================================================== SOURCE CODE FILE: thread_local.h LINES: 1 SIZE: 4.41 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\thread_local.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_runtime/arrayref_tensor.h> namespace torch::aot_inductor { template <typename T> struct ThreadLocalCachedOutputTensor; template <> struct ThreadLocalCachedOutputTensor<RAIIAtenTensorHandle> { explicit ThreadLocalCachedOutputTensor(const RAIIAtenTensorHandle&) {} void copy_data_from(const RAIIAtenTensorHandle& handle) { throw std::runtime_error("can't happen"); } AtenTensorHandle tensor() const { throw std::runtime_error("can't happen"); } }; template <> struct ThreadLocalCachedOutputTensor<AtenTensorHandle> { explicit ThreadLocalCachedOutputTensor(const AtenTensorHandle&) {} void copy_data_from(const AtenTensorHandle& handle) { throw std::runtime_error("can't happen"); } AtenTensorHandle tensor() const { throw std::runtime_error("can't happen"); } }; template <> struct ThreadLocalCachedOutputTensor<ConstantHandle> { explicit ThreadLocalCachedOutputTensor(const ConstantHandle&) {} void copy_data_from(const ConstantHandle& handle) { throw std::runtime_error("can't happen"); } AtenTensorHandle tensor() const { throw std::runtime_error("can't happen"); } }; template <typename T> struct ThreadLocalCachedOutputTensor<ArrayRefTensor<T>> { explicit ThreadLocalCachedOutputTensor(const ArrayRefTensor<T>& t) { realloc(t); } void copy_data_from(const ArrayRefTensor<T>& t) { if (t.numel() > capacity_) { realloc(t); } std::copy(t.data(), t.data() + t.numel(), storage_.get()); } AtenTensorHandle tensor() const { return tensor_.get(); } private: void realloc(const ArrayRefTensor<T>& t) { capacity_ = t.numel(); // NOLINTNEXTLINE(*arrays*) storage_ = std::make_unique<T[]>(t.numel()); AtenTensorHandle handle = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob( storage_.get(), t.sizes().size(), t.sizes().data(), t.strides().data(), 0, aoti_torch_dtype<std::remove_const_t<T>>(), t.device_type(), t.device_idx(), &handle)); tensor_ = handle; } // NOLINTNEXTLINE(*arrays*) std::unique_ptr<T[]> storage_; int64_t capacity_ = 0; RAIIAtenTensorHandle tensor_; }; template <typename T> struct ThreadLocalCachedOutputArray; // Just needs to compile, doesn't need to do anything. template <> struct ThreadLocalCachedOutputArray<RAIIAtenTensorHandle> { explicit ThreadLocalCachedOutputArray(const RAIIAtenTensorHandle&) { throw std::runtime_error("can't happen"); } // Not supported yet! We would need to put contiguous() or // expect_contiguous() into the ABI. void copy_data_from(const RAIIAtenTensorHandle&) { throw std::runtime_error("can't happen"); } template <typename U> ArrayRefTensor<U> arrayref_tensor() const { throw std::runtime_error("can't happen"); } }; // Just needs to compile, doesn't need to do anything. template <> struct ThreadLocalCachedOutputArray<ConstantHandle> { explicit ThreadLocalCachedOutputArray(const ConstantHandle&) { throw std::runtime_error("can't happen"); } // Not supported yet! We would need to put contiguous() or // expect_contiguous() into the ABI. void copy_data_from(const ConstantHandle&) { throw std::runtime_error("can't happen"); } template <typename U> ArrayRefTensor<U> arrayref_tensor() const { throw std::runtime_error("can't happen"); } }; template <typename T> struct ThreadLocalCachedOutputArray<ArrayRefTensor<T>> { explicit ThreadLocalCachedOutputArray(const ArrayRefTensor<T>& t) {} template < typename U, std::enable_if_t< std::is_same_v<std::remove_const_t<T>, std::remove_const_t<U>>, bool> = true> ArrayRefTensor<T> arrayref_tensor() const { return tensor_; } void copy_data_from(const ArrayRefTensor<T>& t) { if (t.numel() > capacity_) { capacity_ = t.numel(); // NOLINTNEXTLINE(*arrays*) storage_ = std::make_unique<T[]>(capacity_); } std::copy(t.data(), t.data() + t.numel(), storage_.get()); tensor_ = t; tensor_.set_arrayref(MiniArrayRef<T>(storage_.get(), t.numel())); } private: // NOLINTNEXTLINE(*arrays*) std::unique_ptr<T[]> storage_; uint32_t capacity_ = 0; ArrayRefTensor<T> tensor_; }; } // namespace torch::aot_inductor ```
============================================================================================================================================== SOURCE CODE FILE: utils.h LINES: 1 SIZE: 7.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\utils.h ENCODING: utf-8 ```h #pragma once #include <iostream> #include <memory> #include <sstream> #include <stdexcept> #include <string> #include <vector> // WARNING: Be careful when adding new includes here. This header will be used // in model.so, and should not refer to any aten/c10 headers except the stable // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule // applies to other files under torch/csrc/inductor/aoti_runtime/. #include <torch/csrc/inductor/aoti_torch/c/shim.h> #if defined(__GNUC__) || defined(__clang__) #define AOTI_NOINLINE __attribute__((noinline)) #elif _MSC_VER #define AOTI_NOINLINE __declspec(noinline) #else #define AOTI_NOINLINE #endif AOTI_NOINLINE static void throw_exception( const char* call, const char* file, int64_t line) { std::stringstream ss; ss << call << " API call failed at " << file << ", line " << line; throw std::runtime_error(ss.str()); } #define AOTI_TORCH_ERROR_CODE_CHECK(call) \ if ((call) != AOTI_TORCH_SUCCESS) { \ throw_exception(#call, __FILE__, __LINE__); \ } using AOTIRuntimeError = int32_t; #define AOTI_RUNTIME_SUCCESS 0 #define AOTI_RUNTIME_FAILURE 1 #define AOTI_RUNTIME_ERROR_CODE_CHECK(call) \ if ((call) != AOTI_RUNTIME_SUCCESS) { \ throw_exception(#call, __FILE__, __LINE__); \ } namespace torch::aot_inductor { using DeleterFnPtr = void (*)(void*); inline void noop_deleter(void*) {} inline void delete_tensor_object(void* ptr) { AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_delete_tensor_object(reinterpret_cast<AtenTensorHandle>(ptr))); } // RAIIAtenTensorHandle steals the tensor objects created by the libtorch C ABI class RAIIAtenTensorHandle { public: RAIIAtenTensorHandle() : handle_(nullptr, noop_deleter) {} RAIIAtenTensorHandle(const RAIIAtenTensorHandle& other) = delete; RAIIAtenTensorHandle& operator=(const RAIIAtenTensorHandle& other) = delete; // Steal the ownership from another RAIIAtenTensorHandle using std::move RAIIAtenTensorHandle(RAIIAtenTensorHandle&& other) = default; RAIIAtenTensorHandle& operator=(RAIIAtenTensorHandle&& other) = default; // Steal the ownership from raw AtenTensorHandle RAIIAtenTensorHandle(AtenTensorHandle handle) : handle_(handle, delete_tensor_object) {} ~RAIIAtenTensorHandle() { handle_.reset(); } // Return a raw AtenTensorHandle to be used by aoti_torch functions // Note: this function does NOT transfer the ownership of the handle operator AtenTensorHandle() const { return handle_.get(); } AtenTensorHandle release() { return handle_.release(); } AtenTensorHandle get() const { return handle_.get(); } void reset() { handle_.reset(); } int64_t size(int64_t d) { int64_t size = 0; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_size(handle_.get(), d, &size)); return size; } int64_t stride(int64_t d) { int64_t stride = 0; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_get_stride(handle_.get(), d, &stride)); return stride; } int64_t storage_offset() { int64_t storage_offset = 0; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_get_storage_offset(handle_.get(), &storage_offset)); return storage_offset; } void* data_ptr() const { void* result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_get_data_ptr(handle_.get(), &result)); return result; } int64_t* sizes() const { int64_t* result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_sizes(handle_.get(), &result)); return result; } int64_t* strides() const { int64_t* result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides(handle_.get(), &result)); return result; } private: std::unique_ptr<AtenTensorOpaque, DeleterFnPtr> handle_; }; // Steal the ownership from raw AtenTensorHandle to RAIIAtenTensorHandle inline std::vector<RAIIAtenTensorHandle> steal_from_raw_handles_to_raii_handles( AtenTensorHandle* handles, size_t size) { std::vector<RAIIAtenTensorHandle> result; result.reserve(size); for (size_t i = 0; i < size; i++) { result.emplace_back(handles[i]); handles[i] = nullptr; } return result; } inline AtenTensorHandle reinterpret_tensor_wrapper( AtenTensorHandle self, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int64_t storage_offset) { AtenTensorHandle result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch__reinterpret_tensor( self, ndim, sizes_ptr, strides_ptr, storage_offset, &result)); return result; } inline void* get_data_ptr_wrapper(AtenTensorHandle tensor) { void* result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(tensor, &result)); return result; } inline AtenTensorHandle unwrap_raii_handle_if_needed( const RAIIAtenTensorHandle& handle) { return handle.get(); } inline RAIIAtenTensorHandle wrap_with_raii_handle_if_needed( AtenTensorHandle handle) { return RAIIAtenTensorHandle(handle); } class ConstantHandle { public: ConstantHandle() = default; explicit ConstantHandle(AtenTensorHandle handle) : handle_(handle) { AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(handle_, &data_)); } operator AtenTensorHandle() const { return handle_; } AtenTensorHandle tensor() const { return handle_; } AtenTensorHandle get() const { return handle_; } void* data_ptr() const { return data_; } int64_t* sizes() const { int64_t* result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_sizes(handle_, &result)); return result; } int64_t* strides() const { int64_t* result = nullptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides(handle_, &result)); return result; } private: AtenTensorHandle handle_{}; void* data_ = nullptr; }; inline void* get_data_ptr_wrapper(const ConstantHandle& constant) { return constant.data_ptr(); } inline const ConstantHandle& unwrap_raii_handle_if_needed( const ConstantHandle& handle) { return handle; } // Shouldn't be called. inline AtenTensorHandle wrap_with_raii_handle_if_needed( const ConstantHandle& handle) = delete; // DANGEROUS. Do not call unless you explicitly intend to get a reference to a // temporary value, which will expire at the end of the current expression. // This should only be called in cases where the C-shim API expects an optional // input argument (passed by pointer), and a temporary needs to be passed to it. template <class T> T& temporary_reference(T&& t) { return t; } #define CACHE_TORCH_DTYPE(typename) \ static auto cached_torch_dtype_##typename = aoti_torch_dtype_##typename() #define CACHE_TORCH_DEVICE(device) \ static auto cached_torch_device_type_##device = \ aoti_torch_device_type_##device() #define CACHE_TORCH_LAYOUT(layout) \ static auto cached_torch_layout_##layout = aoti_torch_layout_##layout() #define CACHE_TORCH_MEMORY_FORMAT(format) \ static auto cached_torch_memory_format_##format = \ aoti_torch_memory_format_##format() } // namespace torch::aot_inductor ```
=================================================================================================================================================== SOURCE CODE FILE: utils_cuda.h LINES: 1 SIZE: 1.75 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\utils_cuda.h ENCODING: utf-8 ```h #pragma once #ifdef USE_CUDA // WARNING: Be careful when adding new includes here. This header will be used // in model.so, and should not refer to any aten/c10 headers except the stable // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule // applies to other files under torch/csrc/inductor/aoti_runtime/. #include <torch/csrc/inductor/aoti_runtime/utils.h> #include <cuda.h> #include <cuda_runtime.h> namespace torch::aot_inductor { inline void delete_cuda_guard(void* ptr) { AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_delete_cuda_guard(reinterpret_cast<CUDAGuardHandle>(ptr))); } inline void delete_cuda_stream_guard(void* ptr) { AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_delete_cuda_stream_guard( reinterpret_cast<CUDAStreamGuardHandle>(ptr))); } class AOTICudaGuard { public: AOTICudaGuard(int32_t device_index) : guard_(nullptr, delete_cuda_guard) { CUDAGuardHandle ptr = nullptr; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_create_cuda_guard(device_index, &ptr)); guard_.reset(ptr); } void set_index(int32_t device_index) { AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_cuda_guard_set_index(guard_.get(), device_index)); } private: std::unique_ptr<CUDAGuardOpaque, DeleterFnPtr> guard_; }; class AOTICudaStreamGuard { public: AOTICudaStreamGuard(cudaStream_t stream, int32_t device_index) : guard_(nullptr, delete_cuda_stream_guard) { CUDAStreamGuardHandle ptr = nullptr; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_create_cuda_stream_guard(stream, device_index, &ptr)); guard_.reset(ptr); } private: std::unique_ptr<CUDAStreamGuardOpaque, DeleterFnPtr> guard_; }; } // namespace torch::aot_inductor #endif // USE_CUDA ```
================================================================================================================================================== SOURCE CODE FILE: utils_xpu.h LINES: 1 SIZE: 1.73 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_runtime\utils_xpu.h ENCODING: utf-8 ```h #pragma once #ifdef USE_XPU // WARNING: Be careful when adding new includes here. This header will be used // in model.so, and should not refer to any aten/c10 headers except the stable // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule // applies to other files under torch/csrc/inductor/aoti_runtime/. #include <torch/csrc/inductor/aoti_runtime/utils.h> #include <torch/csrc/inductor/aoti_torch/c/shim_xpu.h> namespace torch::aot_inductor { inline void delete_xpu_guard(void* ptr) { AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_delete_xpu_guard(reinterpret_cast<XPUGuardHandle>(ptr))); } inline void delete_xpu_stream_guard(void* ptr) { AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_delete_xpu_stream_guard( reinterpret_cast<XPUStreamGuardHandle>(ptr))); } class AOTIXpuGuard { public: AOTIXpuGuard(int32_t device_index) : guard_(nullptr, delete_xpu_guard) { XPUGuardHandle ptr = nullptr; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_create_xpu_guard(device_index, &ptr)); guard_.reset(ptr); } void set_index(int32_t device_index) { AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_xpu_guard_set_index(guard_.get(), device_index)); } private: std::unique_ptr<XPUGuardOpaque, DeleterFnPtr> guard_; }; class AOTIXpuStreamGuard { public: AOTIXpuStreamGuard(void* stream, int32_t device_index) : guard_(nullptr, delete_xpu_stream_guard) { XPUStreamGuardHandle ptr = nullptr; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_create_xpu_stream_guard(stream, device_index, &ptr)); guard_.reset(ptr); } private: std::unique_ptr<XPUStreamGuardOpaque, DeleterFnPtr> guard_; }; } // namespace torch::aot_inductor #endif // USE_XPU ```
============================================================================================================================================= SOURCE CODE FILE: shim.h LINES: 1 SIZE: 29.41 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\c\shim.h ENCODING: utf-8 ```h #ifndef AOTI_TORCH_SHIM #define AOTI_TORCH_SHIM #include <stddef.h> #include <stdint.h> // This header defines a stable C API for certain ATen functionality in // libtorch. The AOTInductor compiled model.so will only refer to this header // instead of other headers from aten/c10, which means it will NOT be able to // directly use any data structures or call functions from libtorch. // // What problems are we trying to solve here? Direct use of aten/c10 APIs // means use of C++ APIs on a library that doesn't have any ABI compatibility // guarantees. However, we want model.so to remain usable across updates // to the PyTorch C++ libraries, which requires a stable ABI. By introducing // a C shim layer, we can minimize the surface that will cause breakage. The // corresponding software stack can be illustrated as follows: // // |--------------------------------| // | inference service code | // |--------------------------------| // | model.so | // |--------------|-----------------| // | <c shim> | // | libtorch.so | // |--------------------------------| // // The general guidelines for the C API: // // - No exceptions, return an explicit error code to be checked at call site // - Only pointers (AtenTensorHandle counts), integers and floats in headers // // If you want to make changes to this header, you MUST MAINTAIN ABI // compatibility. Typically, this means you will have to add a _v2 version // of a function that you, e.g., want to add a new function parameter to, and // maintain the old and new versions of the APIs until all old model.so // go out of use. #ifdef __GNUC__ #define AOTI_TORCH_EXPORT __attribute__((__visibility__("default"))) #else // !__GNUC__ #ifdef _WIN32 // PyTorch2 doesn't currently work on Windows. Exporting these APIs can lead // to symbol clashes at link time if libtorch is included in a DLL and binary // that depends on the DLL. As a short term fix, we don't export the symbols. // In the long term, this will need to be addressed when Windows is supported. #ifdef OVRSOURCE // Do not export AOTI on Windows for internal builds #define AOTI_TORCH_EXPORT #else /* OVRSOURCE */ #ifdef EXPORT_AOTI_FUNCTIONS #define AOTI_TORCH_EXPORT __declspec(dllexport) #else #define AOTI_TORCH_EXPORT __declspec(dllimport) #endif #endif /* OVRSOURCE */ #else // !_WIN32 #define AOTI_TORCH_EXPORT #endif // _WIN32 #endif // __GNUC__ // The following files are implemented in a header-only way and are guarded by // test/cpp/aoti_abi_check #include <c10/util/BFloat16.h> #include <c10/util/Half.h> #include <c10/util/complex.h> #ifdef __cplusplus extern "C" { #endif // AtenTensorHandle represents an abstract notion of Tensor that can be passed // between model.so and libtorch.so. The contents of the structure itself // are private; model.so is not allowed to access any fields directly, it must // go through functions defined in this ABI. Under the hood, this is // represented as at::Tensor*, but we reserve the right to change this (and in // fact, we probably should change it to at::TensorImpl* at least). // // An AtenTensorHandle can be owning (please check the API reference for exact // ownership/borrow semantics). If you have an owning AtenTensorHandle // in model.so, you are obligated to aoti_torch_delete_tensor_object when you // are done. You can use the helper C++ class RAIIAtenTensorHandle // (see aot_runtime/model.h) to ensure the deallocator is called in RAII style // (note that RAIIAtenTensorHandle is private to model.so, and never crosses // the ABI boundary.) struct AtenTensorOpaque; using AtenTensorHandle = AtenTensorOpaque*; struct AtenGeneratorOpaque; using AtenGeneratorHandle = AtenGeneratorOpaque*; struct AOTIProxyExecutorOpaque; using AOTIProxyExecutorHandle = AOTIProxyExecutorOpaque*; using AOTITorchError = int32_t; #define AOTI_TORCH_SUCCESS 0 #define AOTI_TORCH_FAILURE 1 // Getter functions for retrieving various constants from the runtime, that // can subsequently be passed to other aoti_* functions. By hiding these // behind functions, the precise value of device/dtype is NOT part of the // ABI contract. (In practice, aten/c10 is pretty good about not renumbering // these, so we probably could later switch to having these in the ABI, if // desired for perf reasons.) AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_cpu(); AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_cuda(); AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_meta(); AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_xpu(); AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_privateuse1(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e5m2(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e4m3fn(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e5m2fnuz(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e4m3fnuz(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_bfloat16(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float16(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float32(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float64(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint8(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint16(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint32(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint64(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int8(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int16(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int32(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int64(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_bool(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_complex32(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_complex64(); AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_complex128(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout_strided(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout_sparse_coo(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout_sparse_csr(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout_sparse_csc(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout_sparse_bsr(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout_sparse_bsc(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout__mkldnn(); AOTI_TORCH_EXPORT int32_t aoti_torch_layout_jagged(); AOTI_TORCH_EXPORT int32_t aoti_torch_memory_format_contiguous_format(); AOTI_TORCH_EXPORT int32_t aoti_torch_memory_format_channels_last(); AOTI_TORCH_EXPORT int32_t aoti_torch_memory_format_channels_last_3d(); AOTI_TORCH_EXPORT int32_t aoti_torch_memory_format_preserve_format(); // Get TORCH_ABI_VERSION of the built libtorch.so AOTI_TORCH_EXPORT uint64_t aoti_torch_abi_version(); // Functions for converting a single-element tensor to a scalar value AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_float16(AtenTensorHandle tensor, c10::Half* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_float32(AtenTensorHandle tensor, float* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_float64(AtenTensorHandle tensor, double* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_uint8(AtenTensorHandle tensor, uint8_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_uint16(AtenTensorHandle tensor, uint16_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_uint32(AtenTensorHandle tensor, uint32_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_uint64(AtenTensorHandle tensor, uint64_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_int8(AtenTensorHandle tensor, int8_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_int16(AtenTensorHandle tensor, int16_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_int32(AtenTensorHandle tensor, int32_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_int64(AtenTensorHandle tensor, int64_t* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_bool(AtenTensorHandle tensor, bool* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_bfloat16(AtenTensorHandle tensor, c10::BFloat16* ret_value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_item_complex64( AtenTensorHandle tensor, c10::complex<float>* ret_value); // Functions for wrapping a scalar value to a single-element tensor AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_float32( float value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_float64( double value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint8( uint8_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint16( uint16_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint32( uint32_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint64( uint64_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int8( int8_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int16( int16_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int32( int32_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int64( int64_t value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_bool(bool value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_complex64( c10::complex<float> value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_complex128( c10::complex<double> value, AtenTensorHandle* ret_new_tensor); AOTI_TORCH_EXPORT bool aoti_torch_grad_mode_is_enabled(); AOTI_TORCH_EXPORT void aoti_torch_grad_mode_set_enabled(bool enabled); // Free the tensor object AOTI_TORCH_EXPORT AOTITorchError aoti_torch_delete_tensor_object(AtenTensorHandle tensor); // Get a pointer to the underlying storage data AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_data_ptr( AtenTensorHandle tensor, void** ret_data_ptr // returns borrowed reference ); // Get the nbytes of the underlying storage AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_storage_size(AtenTensorHandle tensor, int64_t* ret_size); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_dim(AtenTensorHandle tensor, int64_t* ret_dim); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_numel(AtenTensorHandle tensor, int64_t* ret_numel); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_storage_numel(AtenTensorHandle tensor, int64_t* ret_numel); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_sizes( AtenTensorHandle tensor, int64_t** ret_sizes // returns borrowed reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_size(AtenTensorHandle tensor, int64_t d, int64_t* ret_size); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_strides( AtenTensorHandle tensor, int64_t** ret_strides // returns borrowed reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_stride(AtenTensorHandle tensor, int64_t d, int64_t* ret_stride); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_dtype(AtenTensorHandle tensor, int32_t* ret_dtype); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_device_type(AtenTensorHandle tensor, int32_t* ret_device_type); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_device_index(AtenTensorHandle tensor, int32_t* ret_device_index); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_storage_offset( AtenTensorHandle tensor, int64_t* ret_storage_offset); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_new_tensor_handle( AtenTensorHandle orig_handle, AtenTensorHandle* new_handle); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__alloc_from_pool( AtenTensorHandle self, int64_t offset_bytes, int32_t dtype, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, AtenTensorHandle* ret_new_tensor); // This function will create a new tensor object and its pointer is returned // through *out. The caller is responsible for wrapping the tensor pointer // with RAIIAtenTensorHandle which will call aoti_torch_delete_tensor_object // when going out of scope. AOTI_TORCH_EXPORT AOTITorchError aoti_torch__reinterpret_tensor( AtenTensorHandle self, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int64_t storage_offset, AtenTensorHandle* ret_new_tensor // returns new reference ); // This function will create a new tensor object and its pointer is returned // through *out. The caller is responsible for wrapping the tensor pointer // with RAIIAtenTensorHandle which will call aoti_torch_delete_tensor_object // when going out of scope. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_empty_strided( int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int32_t dtype, int32_t device_type, int32_t device_index, AtenTensorHandle* ret_new_tensor // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_as_strided( AtenTensorHandle self, const int64_t* sizes_ptr, const int64_t* strides_ptr, AtenTensorHandle* ret); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob( void* data, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int64_t storage_offset, int32_t dtype, int32_t device_type, int32_t device_index, AtenTensorHandle* ret // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob_v2( void* data, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int64_t storage_offset, int32_t dtype, int32_t device_type, int32_t device_index, AtenTensorHandle* ret, // returns new reference int32_t layout, const uint8_t* opaque_metadata, int64_t opaque_metadata_size); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__embedding_bag( AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int32_t mode, int32_t sparse, AtenTensorHandle per_sample_weights, // optional argument int32_t include_last_offset, int32_t padding_idx, AtenTensorHandle* ret0, // returns new reference AtenTensorHandle* ret1, // returns new reference AtenTensorHandle* ret2, // returns new reference AtenTensorHandle* ret3 // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__fft_c2c( AtenTensorHandle self, const int64_t* dim_ptr, int64_t dim_size, int64_t normalization, int32_t forward, AtenTensorHandle* ret // returns new reference ); // This version is deprecated. We will remove it later AOTI_TORCH_EXPORT AOTITorchError aoti_torch__scaled_dot_product_flash_attention( AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, double dropout_p, bool is_causal, bool return_debug_mask, double scale, AtenTensorHandle* ret0, // returns new reference AtenTensorHandle* ret1, // returns new reference AtenTensorHandle* ret2, // returns new reference AtenTensorHandle* ret3, // returns new reference int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, // returns new reference AtenTensorHandle* ret7, // returns new reference AtenTensorHandle* ret8 // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__scaled_dot_product_flash_attention_v2( AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, double dropout_p, int is_causal, int return_debug_mask, double* scale, // optional argument AtenTensorHandle* ret0, // returns new reference AtenTensorHandle* ret1, // returns new reference AtenTensorHandle* ret2, // returns new reference AtenTensorHandle* ret3, // returns new reference int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, // returns new reference AtenTensorHandle* ret7, // returns new reference AtenTensorHandle* ret8 // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__scaled_dot_product_efficient_attention( AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle attn_bias, // optional argument int compute_log_sumexp, double dropout_p, int is_causal, double* scale, // optional argument AtenTensorHandle* ret0, // returns new reference AtenTensorHandle* ret1, // returns new reference AtenTensorHandle* ret2, // returns new reference AtenTensorHandle* ret3 // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__scaled_mm( AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle bias, int32_t* out_dtype, AtenTensorHandle scale_a, AtenTensorHandle scale_b, AtenTensorHandle scale_result, int8_t use_fast_accum, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__scaled_mm_v2( AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle scale_a, AtenTensorHandle scale_b, AtenTensorHandle bias, AtenTensorHandle scale_result, int32_t* out_dtype, int8_t use_fast_accum, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_convolution( AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle bias, // optional argument const int64_t* stride_ptr, int64_t stride_size, const int64_t* padding_ptr, int64_t padding_size, const int64_t* dilation_ptr, int64_t dilation_size, int transposed, const int64_t* output_padding_ptr, int64_t output_padding_size, int64_t groups, AtenTensorHandle* ret // returns new reference ); // This function will create a new uninitialized tensor object // and its pointer is returned through *ret. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_new_uninitialized_tensor(AtenTensorHandle* ret); // WARNING: This will be deprecated. Use aoti_torch_copy_ instead. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_tensor_copy_(AtenTensorHandle src, AtenTensorHandle dst); // Make the tensor referred to by dst an alias for the tensor referred // to by src. The two tensors must still be deleted with // aoti_torch_delete_tensor separately (or not) as before the call. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_assign_tensors(AtenTensorHandle src, AtenTensorHandle dst); // Make a shallow copy of the tensor referred to by src and assign // it to the handle in the ret_dst. This is similar to the above // aoti_torch_assign_tensors function, but creates and sets the // ret_dst from within. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_assign_tensors_out(AtenTensorHandle src, AtenTensorHandle* ret_dst); // This function will create a new tensor object and its pointer is returned // through *ret. The caller is responsible for wrapping the tensor pointer // with RAIIAtenTensorHandle which will call aoti_torch_delete_tensor_object // when going out of scope. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_clone(AtenTensorHandle self, AtenTensorHandle* ret); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_clone_preserve_strides(AtenTensorHandle self, AtenTensorHandle* ret); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_addmm_out( AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, float beta, float alpha); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_bmm_out( AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_copy_( AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_mm_out( AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch__mm_plus_mm_out( AtenTensorHandle out, AtenTensorHandle a, AtenTensorHandle b, AtenTensorHandle c, AtenTensorHandle d); // This will soon be deprecated after ao_quantization is complete. // Please refrain from using this or increasing callsites. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_wrapped_fbgemm_pack_gemm_matrix_fp16( AtenTensorHandle weight, AtenTensorHandle* out); // This will soon be deprecated after ao_quantization is complete. // Please refrain from using this or increasing callsites. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__wrapped_linear_prepack( AtenTensorHandle weight, AtenTensorHandle weight_scale, AtenTensorHandle weight_zero_point, AtenTensorHandle bias, AtenTensorHandle* out); // This will soon be deprecated after ao_quantization is complete. // Please refrain from using this or increasing callsites. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_wrapped_fbgemm_linear_fp16_weight( AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle bias, int64_t out_channel, AtenTensorHandle* out); // This will soon be deprecated after ao_quantization is complete. // Please refrain from using this or increasing callsites. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__wrapped_quantized_linear_prepacked( AtenTensorHandle input, AtenTensorHandle input_scale, AtenTensorHandle input_zero_point, AtenTensorHandle weight, AtenTensorHandle out_scale, AtenTensorHandle out_zeropoint, int64_t out_channel, AtenTensorHandle* out); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_nonzero(AtenTensorHandle self, AtenTensorHandle* out); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_zero_(AtenTensorHandle self); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_repeat_interleave_Tensor( AtenTensorHandle repeats, int64_t* output_size, AtenTensorHandle* out); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_check_inf_and_nan(const char* tensor_name, AtenTensorHandle tensor); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scatter_out( AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scatter_reduce_out( AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src, const char* reduce, int32_t include_self); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_index_put_out( AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle* indices, const uint32_t num_indices, const AtenTensorHandle values, bool accumulate); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_view_as_real( AtenTensorHandle self, AtenTensorHandle* ret // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_view_dtype( AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret // returns new reference ); AOTI_TORCH_EXPORT void aoti_torch_print_tensor_handle( AtenTensorHandle self, const char* msg); // When AOTI debug printer option is enabled, this function will be invoked to // torch pickle save the intermediate tensor for debugging purpose. AOTI_TORCH_EXPORT void aoti_torch_save_tensor_handle( AtenTensorHandle self, const char* tensor_name, const char* launch_prefix, const char* kernel_name); // helpers for converting between StableIValue and actual IValues using StableIValue = uint64_t; class TorchLibraryOpaque; using TorchLibraryHandle = TorchLibraryOpaque*; // stable corollary to torch::Library constructor with Kind::IMPL // will create a new torch::Library object on the heap AOTI_TORCH_EXPORT AOTITorchError aoti_torch_library_init_impl( const char* ns, const char* k, const char* file, uint32_t line, TorchLibraryHandle* ret_new_torch_lib); // stable corollary to torch::Library constructor with Kind::DEF // will create a new torch::Library object on the heap AOTI_TORCH_EXPORT AOTITorchError aoti_torch_library_init_def( const char* ns, const char* file, uint32_t line, TorchLibraryHandle* ret_new_torch_lib); // stable corollary to torch::Library constructor with Kind::FRAGMENT // will create a new torch::Library object on the heap AOTI_TORCH_EXPORT AOTITorchError aoti_torch_library_init_fragment( const char* ns, const char* file, uint32_t line, TorchLibraryHandle* ret_new_torch_lib); // stable corollary to torch::Library method m.impl(), should be // called from StableLibrary AOTI_TORCH_EXPORT AOTITorchError aoti_torch_library_impl( TorchLibraryHandle self, const char* name, void (*fn)(StableIValue*, uint64_t, uint64_t)); // stable corollary to torch::Library method m.def(), should be // called from StableLibrary AOTI_TORCH_EXPORT AOTITorchError aoti_torch_library_def(TorchLibraryHandle self, const char* schema); // the above stable constructors for torch::Library add Library objects // to the heap. if you are calling those functions directly, please use // this function to free the Library's memory. The more user friendly // alternative is to use StableLibrary, which will free its handle upon // destruction AOTI_TORCH_EXPORT AOTITorchError aoti_torch_delete_library_object(TorchLibraryHandle tlh); // calls the op overload defined by a given opName, overloadName, and a // stack of StableIValues. This call will populate any return values of the // op into the stack in their StableIValue form, with ret0 at index 0, ret1 // at index 1, and so on. AOTI_TORCH_EXPORT AOTITorchError aoti_torch_call_dispatcher( const char* opName, const char* overloadName, StableIValue* stack); #ifdef USE_CUDA struct CUDAGuardOpaque; using CUDAGuardHandle = CUDAGuardOpaque*; AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_cuda_guard( int32_t device_index, CUDAGuardHandle* ret_guard // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_delete_cuda_guard(CUDAGuardHandle guard); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_guard_set_index(CUDAGuardHandle guard, int32_t device_index); struct CUDAStreamGuardOpaque; using CUDAStreamGuardHandle = CUDAStreamGuardOpaque*; AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_cuda_stream_guard( void* stream, int32_t device_index, CUDAStreamGuardHandle* ret_guard // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_delete_cuda_stream_guard(CUDAStreamGuardHandle guard); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_current_cuda_stream(int32_t device_index, void** ret_stream); #endif // USE_CUDA // See `ProxyExecutor Design Note` in ir.py for more details AOTI_TORCH_EXPORT AOTITorchError aoti_torch_proxy_executor_call_function( AOTIProxyExecutorHandle proxy_executor, int extern_node_index, int num_ints, int64_t* flatten_int_args, int num_tensors, AtenTensorHandle* flatten_tensor_args); AOTI_TORCH_EXPORT void aoti_torch_check( bool cond, const char* func, const char* file, uint32_t line, const char* msg); #ifdef STRIP_ERROR_MESSAGES #define AOTI_TORCH_CHECK(cond, ...) \ if (!(cond)) { \ aoti_torch_check( \ false, \ __func__, \ __FILE__, \ static_cast<uint32_t>(__LINE__), \ TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \ } #else #define AOTI_TORCH_CHECK(cond, ...) \ if (!(cond)) { \ aoti_torch_check( \ false, \ __func__, \ __FILE__, \ static_cast<uint32_t>(__LINE__), \ TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \ } #endif AOTI_TORCH_EXPORT void aoti_torch_warn( const char* func, const char* file, uint32_t line, const char* msg); #ifdef DISABLE_WARN #define AOTI_TORCH_WARN(...) ((void)0); #else #define AOTI_TORCH_WARN(...) \ aoti_torch_warn( \ __func__, __FILE__, static_cast<uint32_t>(__LINE__), #__VA_ARGS__); #endif #ifdef __cplusplus } // extern "C" template <typename T> int32_t aoti_torch_dtype() = delete; #define DEFINE_DTYPE_SPECIALIZATION(ctype, typename) \ template <> \ inline int32_t aoti_torch_dtype<ctype>() { \ return aoti_torch_dtype_##typename(); \ } namespace c10 { struct BFloat16; struct Half; } // namespace c10 DEFINE_DTYPE_SPECIALIZATION(c10::BFloat16, bfloat16) DEFINE_DTYPE_SPECIALIZATION(c10::Half, float16) DEFINE_DTYPE_SPECIALIZATION(c10::complex<float>, complex64) DEFINE_DTYPE_SPECIALIZATION(float, float32) DEFINE_DTYPE_SPECIALIZATION(double, float64) DEFINE_DTYPE_SPECIALIZATION(uint8_t, uint8) DEFINE_DTYPE_SPECIALIZATION(int8_t, int8) DEFINE_DTYPE_SPECIALIZATION(int16_t, int16) DEFINE_DTYPE_SPECIALIZATION(int32_t, int32) DEFINE_DTYPE_SPECIALIZATION(int64_t, int64) DEFINE_DTYPE_SPECIALIZATION(bool, bool) #endif #endif // AOTI_TORCH_SHIM ```
==================================================================================================================================================== SOURCE CODE FILE: shim_mkldnn.h LINES: 1 SIZE: 7.07 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\c\shim_mkldnn.h ENCODING: utf-8 ```h #ifndef AOTI_TORCH_SHIM_MKLDNN #define AOTI_TORCH_SHIM_MKLDNN #include <ATen/Config.h> #include <torch/csrc/inductor/aoti_torch/c/shim.h> #ifdef __cplusplus extern "C" { #endif #if AT_MKLDNN_ENABLED() AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn__convolution_pointwise_binary( AtenTensorHandle X, AtenTensorHandle other, AtenTensorHandle W, AtenTensorHandle* B, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const char* binary_attr, double* alpha, const char** unary_attr, const double** unary_scalars, int64_t unary_scalars_len_, const char** unary_algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn__convolution_pointwise_binary_( AtenTensorHandle other, AtenTensorHandle X, AtenTensorHandle W, AtenTensorHandle* B, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const char* binary_attr, double* alpha, const char** unary_attr, const double** unary_scalars, int64_t unary_scalars_len_, const char** unary_algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn__convolution_pointwise( AtenTensorHandle X, AtenTensorHandle W, AtenTensorHandle* B, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const char* attr, const double** scalars, int64_t scalars_len_, const char** algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn__convolution_transpose_pointwise( AtenTensorHandle X, AtenTensorHandle W, AtenTensorHandle* B, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const char* attr, const double** scalars, int64_t scalars_len_, const char** algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_rnn_layer( AtenTensorHandle input, AtenTensorHandle weight0, AtenTensorHandle weight1, AtenTensorHandle weight2, AtenTensorHandle weight3, AtenTensorHandle hx_, AtenTensorHandle cx_, int32_t reverse, const int64_t* batch_sizes, int64_t batch_sizes_len_, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t has_biases, int32_t bidirectional, int32_t batch_first, int32_t train, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linear_pointwise( AtenTensorHandle X, AtenTensorHandle W, AtenTensorHandle* B, const char* attr, const double** scalars, int64_t scalars_len_, const char** algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linear_pointwise_binary( AtenTensorHandle X, AtenTensorHandle other, AtenTensorHandle W, AtenTensorHandle* B, const char* attr, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__qlinear_pointwise_tensor( AtenTensorHandle X, AtenTensorHandle act_scale, AtenTensorHandle act_zero_point, AtenTensorHandle onednn_weight, AtenTensorHandle weight_scales, AtenTensorHandle weight_zero_points, AtenTensorHandle* B, double output_scale, int64_t output_zero_point, const int32_t* output_dtype, const char* post_op_name, const double** post_op_args, int64_t post_op_args_len_, const char* post_op_algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__qlinear_pointwise_binary_tensor( AtenTensorHandle X, AtenTensorHandle act_scale, AtenTensorHandle act_zero_point, AtenTensorHandle onednn_weight, AtenTensorHandle weight_scales, AtenTensorHandle weight_zero_points, AtenTensorHandle* other, AtenTensorHandle* B, double output_scale, int64_t output_zero_point, const int32_t* output_dtype, double other_scale, int64_t other_zero_point, const char* binary_post_op, double binary_alpha, const char* unary_post_op, const double** unary_post_op_args, int64_t unary_post_op_args_len_, const char* unary_post_op_algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__qconv2d_pointwise_tensor( AtenTensorHandle X, AtenTensorHandle act_scale, AtenTensorHandle act_zero_point, AtenTensorHandle onednn_weight, AtenTensorHandle weight_scales, AtenTensorHandle weight_zero_points, AtenTensorHandle* B, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, double output_scale, int64_t output_zero_point, const int32_t* output_dtype, const char* attr, const double** post_op_args, int64_t post_op_args_len_, const char** algorithm, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__qconv2d_pointwise_binary_tensor( AtenTensorHandle X, AtenTensorHandle act_scale, AtenTensorHandle act_zero_point, AtenTensorHandle onednn_weight, AtenTensorHandle weight_scales, AtenTensorHandle weight_zero_points, AtenTensorHandle accum, AtenTensorHandle* B, const int64_t* stride_args, int64_t stride_len_, const int64_t* padding_args, int64_t padding_len_, const int64_t* dilation_args, int64_t dilation_len_, int64_t groups, double output_scale, int64_t output_zero_point, const int32_t* output_dtype, double accum_scale, int64_t accum_zero_point, const char* binary_attr, double* alpha, const char** unary_attr, const double** unary_scalars, int64_t unary_scalars_len_, const char** unary_algorithm, AtenTensorHandle* ret0); #if AT_MKL_ENABLED() AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__mkl_linear( AtenTensorHandle X, AtenTensorHandle W, AtenTensorHandle origin_W, AtenTensorHandle* B, int64_t prepack_batch_size, AtenTensorHandle* ret0); #endif // AT_MKL_ENABLED #endif // AT_MKLDNN_ENABLED() AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_int4pack_mm_cpu_tensor( AtenTensorHandle X, AtenTensorHandle w, AtenTensorHandle qGroupSize, AtenTensorHandle qScaleAndZeros, AtenTensorHandle* ret0); #ifdef __cplusplus } // extern "C" #endif #endif // AOTI_TORCH_SHIM_MKLDNN ```
================================================================================================================================================= SOURCE CODE FILE: shim_xpu.h LINES: 1 SIZE: 1.42 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\c\shim_xpu.h ENCODING: utf-8 ```h #ifndef AOTI_TORCH_SHIM_XPU #define AOTI_TORCH_SHIM_XPU #include <torch/csrc/inductor/aoti_torch/c/shim.h> #ifdef USE_XPU #ifdef __cplusplus extern "C" { #endif struct XPUGuardOpaque; using XPUGuardHandle = XPUGuardOpaque*; AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_xpu_guard( int32_t device_index, XPUGuardHandle* ret_guard // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_delete_xpu_guard(XPUGuardHandle guard); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_guard_set_index(XPUGuardHandle guard, int32_t device_index); struct XPUStreamGuardOpaque; using XPUStreamGuardHandle = XPUStreamGuardOpaque*; AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_xpu_stream_guard( void* stream, int32_t device_index, XPUStreamGuardHandle* ret_guard // returns new reference ); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_delete_xpu_stream_guard(XPUStreamGuardHandle guard); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_current_xpu_stream(int32_t device_index, void** ret_stream); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_current_xpu_device(int32_t* device_index); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_set_current_xpu_device(const int32_t& device_index); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_current_sycl_queue(void** ret); #ifdef __cplusplus } // extern "C" #endif #endif // USE_XPU #endif // AOTI_TORCH_SHIM_XPU ```
=========================================================================================================================================================== SOURCE CODE FILE: c_shim_cpu.h LINES: 1 SIZE: 28.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\generated\c_shim_cpu.h ENCODING: utf-8 ```h // WARNING: THIS FILE IS AUTOGENERATED BY torchgen. DO NOT MODIFY BY HAND. // See https://github.com/pytorch/pytorch/blob/7e86a7c0155295539996e0cf422883571126073e/torchgen/gen.py#L2424-L2436 for details #pragma once #include <torch/csrc/inductor/aoti_torch/c/shim.h> #ifdef __cplusplus extern "C" { #endif AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool2d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool3d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__addmm_activation(AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha, int32_t use_gelu, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cdist_backward(AtenTensorHandle grad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cdist_forward(AtenTensorHandle x1, AtenTensorHandle x2, double p, int64_t* compute_mode, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__dyn_quant_matmul_4bit(AtenTensorHandle inp, AtenTensorHandle packed_weights, int64_t block_size, int64_t in_features, int64_t out_features, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__dyn_quant_pack_4bit_weight(AtenTensorHandle weights, AtenTensorHandle scales_zeros, AtenTensorHandle* bias, int64_t block_size, int64_t in_features, int64_t out_features, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__efficientzerotensor(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_dense_backward(AtenTensorHandle grad, AtenTensorHandle indices, AtenTensorHandle offset2bag, AtenTensorHandle bag_size, AtenTensorHandle maximum_indices, int64_t num_weights, int32_t scale_grad_by_freq, int64_t mode, AtenTensorHandle* per_sample_weights, int64_t padding_idx, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_forward_only(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_per_sample_weights_backward(AtenTensorHandle grad, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, AtenTensorHandle offset2bag, int64_t mode, int64_t padding_idx, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_c2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t forward, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_r2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t onesided, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_moving_avg_obs_fq_helper(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_moving_avg_obs_fq_helper_functional(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4, AtenTensorHandle* ret5); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__histogramdd_from_bin_cts(AtenTensorHandle self, const int64_t* bins, int64_t bins_len_, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__int_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pdist_backward(AtenTensorHandle grad, AtenTensorHandle self, double p, AtenTensorHandle pdist, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pdist_forward(AtenTensorHandle self, double p, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__scaled_dot_product_flash_attention_for_cpu(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, double dropout_p, int32_t is_causal, AtenTensorHandle* attn_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__scaled_dot_product_flash_attention_for_cpu_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle out, AtenTensorHandle logsumexp, double dropout_p, int32_t is_causal, AtenTensorHandle* attn_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__scaled_dot_product_fused_attention_overrideable(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_bias, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, AtenTensorHandle* ret7, AtenTensorHandle* ret8); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__scaled_dot_product_fused_attention_overrideable_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle attn_bias, const int32_t* grad_input_mask, int64_t grad_input_mask_len_, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__segment_reduce_backward(AtenTensorHandle grad, AtenTensorHandle output, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* offsets, int64_t axis, double* initial, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse(AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__trilinear(AtenTensorHandle i1, AtenTensorHandle i2, AtenTensorHandle i3, const int64_t* expand1, int64_t expand1_len_, const int64_t* expand2, int64_t expand2_len_, const int64_t* expand3, int64_t expand3_len_, const int64_t* sumdim, int64_t sumdim_len_, int64_t unroll_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_int8pack_mm(AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle scales, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool2d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool3d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_add_Scalar(AtenTensorHandle self, double other, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_add_Tensor(AtenTensorHandle self, AtenTensorHandle other, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addbmm(AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addmv(AtenTensorHandle self, AtenTensorHandle mat, AtenTensorHandle vec, double beta, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_angle(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool2d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool3d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_baddbmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bernoulli__Tensor(AtenTensorHandle self, AtenTensorHandle p, AtenGeneratorHandle* generator); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bernoulli__float(AtenTensorHandle self, double p, AtenGeneratorHandle* generator); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bucketize_Tensor(AtenTensorHandle self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cat(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky_inverse(AtenTensorHandle self, int32_t upper, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky_solve(AtenTensorHandle self, AtenTensorHandle input2, int32_t upper, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t** bias_sizes, int64_t bias_sizes_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cummax(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cummin(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cumprod(AtenTensorHandle self, int64_t dim, int32_t* dtype, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cumsum(AtenTensorHandle self, int64_t dim, int32_t* dtype, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_exponential(AtenTensorHandle self, double lambd, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool2d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool3d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_gcd(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_geqrf(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_2d_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histc(AtenTensorHandle self, int64_t bins, double min, double max, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histogram_bin_ct(AtenTensorHandle self, int64_t bins, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_Tensor(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_put(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_reduce(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source, const char* reduce, int32_t include_self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kthvalue(AtenTensorHandle self, int64_t k, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logcumsumexp(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lu_unpack(AtenTensorHandle LU_data, AtenTensorHandle LU_pivots, int32_t unpack_data, int32_t unpack_pivots, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_scatter(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_scatter_backward(AtenTensorHandle grad_output, AtenTensorHandle mask, const int64_t* sizes, int64_t sizes_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_select(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool2d_with_indices(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool2d_with_indices_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool3d_with_indices(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool3d_with_indices_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_unpool2d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_unpool3d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_median(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mode(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mul_Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mul_Tensor(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nanmedian(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_dropout(AtenTensorHandle input, double p, int32_t* train, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nonzero(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_normal_functional(AtenTensorHandle self, double mean, double std, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ormqr(AtenTensorHandle self, AtenTensorHandle input2, AtenTensorHandle input3, int32_t left, int32_t transpose, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_polar(AtenTensorHandle abs, AtenTensorHandle angle, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_pow_Scalar(double self, AtenTensorHandle exponent, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_pow_Tensor_Scalar(AtenTensorHandle self, double exponent, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_pow_Tensor_Tensor(AtenTensorHandle self, AtenTensorHandle exponent, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rand(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rand_generator(const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint(int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_generator(int64_t high, const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_low(int64_t low, int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_low_out(AtenTensorHandle out, int64_t low, int64_t high, const int64_t* size, int64_t size_len_); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randn(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randn_generator(const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randperm(int64_t n, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_repeat_interleave_Tensor(AtenTensorHandle repeats, int64_t* output_size, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad1d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reshape(AtenTensorHandle self, const int64_t* shape, int64_t shape_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_as_(AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_src_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_value_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_reduce_two_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src, const char* reduce, int32_t include_self); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_searchsorted_Scalar(AtenTensorHandle sorted_sequence, double self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_searchsorted_Tensor(AtenTensorHandle sorted_sequence, AtenTensorHandle self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_segment_reduce(AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* indices, AtenTensorHandle* offsets, int64_t axis, int32_t unsafe, double* initial, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_set__source_Tensor(AtenTensorHandle self, AtenTensorHandle source); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_Tensor(AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_soft_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sort(AtenTensorHandle self, int64_t dim, int32_t descending, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sort_stable(AtenTensorHandle self, int32_t* stable, int64_t dim, int32_t descending, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_to_sparse(AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_topk(AtenTensorHandle self, int64_t k, int64_t dim, int32_t largest, int32_t sorted, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_triangular_solve(AtenTensorHandle self, AtenTensorHandle A, int32_t upper, int32_t transpose, int32_t unitriangular, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_uniform(AtenTensorHandle self, double from, double to, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_bicubic2d_backward(AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_linear1d_backward(AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_trilinear3d_backward(AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_d, double* scales_h, double* scales_w, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_dtype(AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_complex(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_real(AtenTensorHandle self, AtenTensorHandle* ret0); #ifdef __cplusplus } // extern "C" #endif ```
============================================================================================================================================================ SOURCE CODE FILE: c_shim_cuda.h LINES: 1 SIZE: 33.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\generated\c_shim_cuda.h ENCODING: utf-8 ```h // WARNING: THIS FILE IS AUTOGENERATED BY torchgen. DO NOT MODIFY BY HAND. // See https://github.com/pytorch/pytorch/blob/7e86a7c0155295539996e0cf422883571126073e/torchgen/gen.py#L2424-L2436 for details #pragma once #include <torch/csrc/inductor/aoti_torch/c/shim.h> #ifdef __cplusplus extern "C" { #endif AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool2d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool3d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__addmm_activation(AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha, int32_t use_gelu, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cdist_backward(AtenTensorHandle grad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cdist_forward(AtenTensorHandle x1, AtenTensorHandle x2, double p, int64_t* compute_mode, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_rnn(AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle* weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__efficient_attention_backward(AtenTensorHandle grad_out_, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* bias, AtenTensorHandle out, AtenTensorHandle* cu_seqlens_q, AtenTensorHandle* cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, AtenTensorHandle logsumexp, double dropout_p, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, int64_t custom_mask_type, int32_t bias_requires_grad, double* scale, int64_t* num_splits_key, int64_t* window_size, int32_t shared_storage_dqdkdv, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__efficient_attention_forward(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* bias, AtenTensorHandle* cu_seqlens_q, AtenTensorHandle* cu_seqlens_k, int64_t* max_seqlen_q, int64_t* max_seqlen_k, double dropout_p, int64_t custom_mask_type, int32_t compute_log_sumexp, double* scale, AtenTensorHandle* seqlen_k, int64_t* window_size, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__efficientzerotensor(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_dense_backward(AtenTensorHandle grad, AtenTensorHandle indices, AtenTensorHandle offset2bag, AtenTensorHandle bag_size, AtenTensorHandle maximum_indices, int64_t num_weights, int32_t scale_grad_by_freq, int64_t mode, AtenTensorHandle* per_sample_weights, int64_t padding_idx, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_forward_only(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_per_sample_weights_backward(AtenTensorHandle grad, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, AtenTensorHandle offset2bag, int64_t mode, int64_t padding_idx, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_c2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t forward, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_r2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t onesided, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__flash_attention_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, AtenTensorHandle rng_state, AtenTensorHandle unused, double* scale, int64_t* window_size_left, int64_t* window_size_right, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__flash_attention_forward(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* cum_seq_q, AtenTensorHandle* cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, int64_t* window_size_left, int64_t* window_size_right, AtenTensorHandle* seqused_k, AtenTensorHandle* alibi_slopes, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_moving_avg_obs_fq_helper(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_moving_avg_obs_fq_helper_functional(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4, AtenTensorHandle* ret5); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__int_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pdist_backward(AtenTensorHandle grad, AtenTensorHandle self, double p, AtenTensorHandle pdist, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pdist_forward(AtenTensorHandle self, double p, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_cudnn_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_bias, int32_t compute_log_sumexp, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, AtenTensorHandle* ret7, AtenTensorHandle* ret8); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_cudnn_attention_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, AtenTensorHandle attn_bias, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_efficient_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_bias, int32_t compute_log_sumexp, double dropout_p, int32_t is_causal, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_efficient_attention_backward(AtenTensorHandle grad_out_, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle attn_bias, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double dropout_p, const int32_t* grad_input_mask, int64_t grad_input_mask_len_, int32_t is_causal, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_flash_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, AtenTensorHandle* ret7, AtenTensorHandle* ret8); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_flash_attention_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_fused_attention_overrideable(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_bias, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, AtenTensorHandle* ret7, AtenTensorHandle* ret8); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_fused_attention_overrideable_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle attn_bias, const int32_t* grad_input_mask, int64_t grad_input_mask_len_, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_mm(AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle scale_a, AtenTensorHandle scale_b, AtenTensorHandle* bias, AtenTensorHandle* scale_result, int32_t* out_dtype, int32_t use_fast_accum, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle scale_a, AtenTensorHandle scale_b, AtenTensorHandle* bias, AtenTensorHandle* scale_result, int32_t* out_dtype, int32_t use_fast_accum); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__segment_reduce_backward(AtenTensorHandle grad, AtenTensorHandle output, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* offsets, int64_t axis, double* initial, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_lstm_cell(AtenTensorHandle input_gates, AtenTensorHandle hidden_gates, AtenTensorHandle cx, AtenTensorHandle* input_bias, AtenTensorHandle* hidden_bias, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse(AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__trilinear(AtenTensorHandle i1, AtenTensorHandle i2, AtenTensorHandle i3, const int64_t* expand1, int64_t expand1_len_, const int64_t* expand2, int64_t expand2_len_, const int64_t* expand3, int64_t expand3_len_, const int64_t* sumdim, int64_t sumdim_len_, int64_t unroll_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool2d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool3d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_add_Scalar(AtenTensorHandle self, double other, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_add_Tensor(AtenTensorHandle self, AtenTensorHandle other, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addbmm(AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addmv(AtenTensorHandle self, AtenTensorHandle mat, AtenTensorHandle vec, double beta, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_angle(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool2d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool3d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_baddbmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bernoulli__Tensor(AtenTensorHandle self, AtenTensorHandle p, AtenGeneratorHandle* generator); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bernoulli__float(AtenTensorHandle self, double p, AtenGeneratorHandle* generator); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bucketize_Tensor(AtenTensorHandle self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cat(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky_inverse(AtenTensorHandle self, int32_t upper, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky_solve(AtenTensorHandle self, AtenTensorHandle input2, int32_t upper, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t** bias_sizes, int64_t bias_sizes_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cummax(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cummin(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cumprod(AtenTensorHandle self, int64_t dim, int32_t* dtype, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cumsum(AtenTensorHandle self, int64_t dim, int32_t* dtype, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_exponential(AtenTensorHandle self, double lambd, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool2d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool3d(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_gcd(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_geqrf(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_2d_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_histc(AtenTensorHandle self, int64_t bins, double min, double max, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_Tensor(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_put(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_reduce(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source, const char* reduce, int32_t include_self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kthvalue(AtenTensorHandle self, int64_t k, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logcumsumexp(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lu_unpack(AtenTensorHandle LU_data, AtenTensorHandle LU_pivots, int32_t unpack_data, int32_t unpack_pivots, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_scatter(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_scatter_backward(AtenTensorHandle grad_output, AtenTensorHandle mask, const int64_t* sizes, int64_t sizes_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_select(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool2d_with_indices(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool2d_with_indices_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool3d_with_indices(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool3d_with_indices_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_unpool2d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_unpool3d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_median(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mode(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mul_Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mul_Tensor(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nanmedian(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_dropout(AtenTensorHandle input, double p, int32_t* train, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nonzero(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_normal_functional(AtenTensorHandle self, double mean, double std, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ormqr(AtenTensorHandle self, AtenTensorHandle input2, AtenTensorHandle input3, int32_t left, int32_t transpose, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_polar(AtenTensorHandle abs, AtenTensorHandle angle, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_pow_Scalar(double self, AtenTensorHandle exponent, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_pow_Tensor_Scalar(AtenTensorHandle self, double exponent, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_pow_Tensor_Tensor(AtenTensorHandle self, AtenTensorHandle exponent, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rand(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rand_generator(const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint(int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_generator(int64_t high, const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_low(int64_t low, int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_low_out(AtenTensorHandle out, int64_t low, int64_t high, const int64_t* size, int64_t size_len_); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randn(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randn_generator(const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randperm(int64_t n, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_repeat_interleave_Tensor(AtenTensorHandle repeats, int64_t* output_size, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad1d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reshape(AtenTensorHandle self, const int64_t* shape, int64_t shape_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_as_(AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_src_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_value_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_reduce_two_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src, const char* reduce, int32_t include_self); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_searchsorted_Scalar(AtenTensorHandle sorted_sequence, double self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_searchsorted_Tensor(AtenTensorHandle sorted_sequence, AtenTensorHandle self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_segment_reduce(AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* indices, AtenTensorHandle* offsets, int64_t axis, int32_t unsafe, double* initial, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_set__source_Tensor(AtenTensorHandle self, AtenTensorHandle source); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_Tensor(AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_soft_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sort(AtenTensorHandle self, int64_t dim, int32_t descending, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sort_stable(AtenTensorHandle self, int32_t* stable, int64_t dim, int32_t descending, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_to_sparse(AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_topk(AtenTensorHandle self, int64_t k, int64_t dim, int32_t largest, int32_t sorted, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_triangular_solve(AtenTensorHandle self, AtenTensorHandle A, int32_t upper, int32_t transpose, int32_t unitriangular, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_uniform(AtenTensorHandle self, double from, double to, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_bicubic2d_backward(AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_linear1d_backward(AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_trilinear3d_backward(AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_d, double* scales_h, double* scales_w, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_dtype(AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_complex(AtenTensorHandle self, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_real(AtenTensorHandle self, AtenTensorHandle* ret0); #ifdef __cplusplus } // extern "C" #endif ```
=========================================================================================================================================================== SOURCE CODE FILE: c_shim_xpu.h LINES: 1 SIZE: 9.62 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\generated\c_shim_xpu.h ENCODING: utf-8 ```h // WARNING: THIS FILE IS AUTOGENERATED BY torchgen. DO NOT MODIFY BY HAND. // See https://github.com/pytorch/pytorch/blob/7e86a7c0155295539996e0cf422883571126073e/torchgen/gen.py#L2424-L2436 for details #pragma once #include <torch/csrc/inductor/aoti_torch/c/shim.h> #ifdef __cplusplus extern "C" { #endif AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu__addmm_activation(AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha, int32_t use_gelu, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu__fused_moving_avg_obs_fq_helper_functional(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4, AtenTensorHandle* ret5); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu__scaled_dot_product_fused_attention_overrideable(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_bias, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, AtenTensorHandle* ret7, AtenTensorHandle* ret8); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu__scaled_dot_product_fused_attention_overrideable_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle attn_bias, const int32_t* grad_input_mask, int64_t grad_input_mask_len_, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu__trilinear(AtenTensorHandle i1, AtenTensorHandle i2, AtenTensorHandle i3, const int64_t* expand1, int64_t expand1_len_, const int64_t* expand2, int64_t expand2_len_, const int64_t* expand3, int64_t expand3_len_, const int64_t* sumdim, int64_t sumdim_len_, int64_t unroll_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_add_Scalar(AtenTensorHandle self, double other, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_addbmm(AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_addmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_addmv(AtenTensorHandle self, AtenTensorHandle mat, AtenTensorHandle vec, double beta, double alpha, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_baddbmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_bmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_cholesky_solve(AtenTensorHandle self, AtenTensorHandle input2, int32_t upper, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_convolution_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t** bias_sizes, int64_t bias_sizes_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_cummax(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_cummin(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_exponential(AtenTensorHandle self, double lambd, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_index_put(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_kthvalue(AtenTensorHandle self, int64_t k, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_logcumsumexp(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_masked_scatter(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_masked_scatter_backward(AtenTensorHandle grad_output, AtenTensorHandle mask, const int64_t* sizes, int64_t sizes_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_mul_Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_normal_functional(AtenTensorHandle self, double mean, double std, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_polar(AtenTensorHandle abs, AtenTensorHandle angle, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_rand(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_rand_generator(const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_randint(int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_randint_generator(int64_t high, const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_randint_low(int64_t low, int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_randint_low_out(AtenTensorHandle out, int64_t low, int64_t high, const int64_t* size, int64_t size_len_); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_randn(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_randn_generator(const int64_t* size, int64_t size_len_, AtenGeneratorHandle* generator, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_randperm(int64_t n, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_reshape(AtenTensorHandle self, const int64_t* shape, int64_t shape_len_, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_resize_as_(AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_slice_Tensor(AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_soft_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_sort(AtenTensorHandle self, int64_t dim, int32_t descending, AtenTensorHandle* ret0, AtenTensorHandle* ret1); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_to_sparse(AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_uniform(AtenTensorHandle self, double from, double to, AtenGeneratorHandle* generator, AtenTensorHandle* ret0); AOTI_TORCH_EXPORT AOTITorchError aoti_torch_xpu_view_dtype(AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret0); #ifdef __cplusplus } // extern "C" #endif ```
==================================================================================================================================================== SOURCE CODE FILE: mkldnn_tensor.h LINES: 1 SIZE: 0.38 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\mkldnn_tensor.h ENCODING: utf-8 ```h #pragma once #include <ATen/Tensor.h> namespace torch::aot_inductor { void* data_ptr_from_mkldnn(at::Tensor* mkldnn_tensor); at::Tensor mkldnn_tensor_from_data_ptr( void* data_ptr, at::IntArrayRef dims, at::ScalarType dtype, at::Device device, const uint8_t* opaque_metadata, int64_t opaque_metadata_size); } // namespace torch::aot_inductor ```
========================================================================================================================================================= SOURCE CODE FILE: oss_proxy_executor.h LINES: 1 SIZE: 2.90 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\oss_proxy_executor.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/dispatch/Dispatcher.h> #include <ATen/core/ivalue.h> #include <c10/macros/Export.h> #include <nlohmann/json.hpp> #include <torch/csrc/inductor/aoti_torch/c/shim.h> #include <torch/csrc/inductor/aoti_torch/proxy_executor.h> #include <iostream> #include <utility> namespace torch::aot_inductor { enum class DynamicArgType : int { TensorType = 0, ListTensorType = 1, ListOptionalTensorType = 2, IntType = 3, ListIntType = 4, }; inline std::ostream& operator<<(std::ostream& os, DynamicArgType arg_type) { os << static_cast<int>(arg_type); return os; } inline bool isTensorType(DynamicArgType arg_type) { return arg_type == DynamicArgType::TensorType || arg_type == DynamicArgType::ListTensorType || arg_type == DynamicArgType::ListOptionalTensorType; } struct OSSDynamicArg { OSSDynamicArg( int arg_index, DynamicArgType arg_type, int length, std::optional<std::vector<std::string>> list_item_types = std::nullopt) : arg_index(arg_index), arg_type(arg_type), length(length), list_item_types(std::move(list_item_types)) {} int arg_index; DynamicArgType arg_type; int length; std::optional<std::vector<std::string>> list_item_types; // only used for parsing list of optional tensors }; struct OSSOpKernel { OSSOpKernel(std::string target, c10::OperatorHandle op_handle) : target_(std::move(target)), op_handle_(std::move(op_handle)) {} std::string target_; c10::OperatorHandle op_handle_; std::vector<OSSDynamicArg> dynamic_args_; std::vector<OSSDynamicArg> outputs_; std::vector<c10::IValue> stack_; int num_output_tensors() const { int num_output_tensors = 0; for (const auto& output : outputs_) { if (isTensorType(output.arg_type)) { num_output_tensors += output.length; } } return num_output_tensors; } }; class OSSProxyExecutor : public ProxyExecutor { public: explicit OSSProxyExecutor(const std::string& json_path, bool is_cpu); void call_function( int extern_node_index, int num_ints, int64_t* flatten_int_args, int num_tensors, AtenTensorHandle* flatten_tensor_args) override; private: void prefill_stack_with_static_arguments( size_t index, const at::TypePtr& schema_arg_type, const nlohmann::json& serialized_arg, OSSOpKernel& op_kernel); void get_input_info_from_serialized( const std::vector<c10::Argument>& schema_args, const nlohmann::json& serialized_node, OSSOpKernel& op_kernel); void get_output_info_from_serialized( const std::vector<c10::Argument>& schema_returns, const nlohmann::json& serialized_node, OSSOpKernel& op_kernel); std::vector<OSSOpKernel> op_kernels_; std::unique_ptr<c10::Device> device_; }; } // namespace torch::aot_inductor ```
===================================================================================================================================================== SOURCE CODE FILE: proxy_executor.h LINES: 1 SIZE: 0.49 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\proxy_executor.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/ivalue.h> #include <c10/macros/Export.h> #include <torch/csrc/inductor/aoti_torch/c/shim.h> namespace torch::aot_inductor { class ProxyExecutor { public: ProxyExecutor() = default; virtual ~ProxyExecutor() = default; virtual void call_function( int extern_node_index, int num_ints, int64_t* flatten_int_args, int num_tensors, AtenTensorHandle* flatten_tensor_args) = 0; }; } // namespace torch::aot_inductor ```
======================================================================================================================================================= SOURCE CODE FILE: tensor_converter.h LINES: 1 SIZE: 0.96 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\tensor_converter.h ENCODING: utf-8 ```h #pragma once #include <ATen/Tensor.h> #include <torch/csrc/inductor/aoti_torch/c/shim.h> namespace torch::aot_inductor { // Functions declared here are not meant to be called from the AOTInductor // generated model.so // unsafe_alloc_new_handles_from_tensors is used for allocating new aten // tensor objects and return them as a vector of AtenTensorHandle (raw // pointers), and those pointers will be stolen by model.so. TORCH_API std::vector<AtenTensorHandle> unsafe_alloc_new_handles_from_tensors( const std::vector<at::Tensor>& tensors); // alloc_tensors_by_stealing_from_handles is used for creating a vector of aten // tensors by stealing from an array of handles. Only the handles are stolen, // and the array itself is borrowed. // // WARNING: Can NOT be called in model.so TORCH_API std::vector<at::Tensor> alloc_tensors_by_stealing_from_handles( AtenTensorHandle* handles, size_t length); } // namespace torch::aot_inductor ```
============================================================================================================================================ SOURCE CODE FILE: utils.h LINES: 1 SIZE: 7.35 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\aoti_torch\utils.h ENCODING: utf-8 ```h #pragma once #include <ATen/Generator.h> #include <ATen/Tensor.h> #include <ATen/core/List.h> #include <c10/core/DeviceType.h> #include <c10/core/SymIntArrayRef.h> #include <c10/util/ArrayRef.h> #include <c10/util/Logging.h> #include <c10/util/OptionalArrayRef.h> #include <torch/csrc/inductor/aoti_torch/c/shim.h> #include <optional> #define AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE(...) \ try { \ __VA_ARGS__ \ } catch (const std::exception& e) { \ LOG(ERROR) << "Exception in aoti_torch: " << e.what(); \ return AOTI_TORCH_FAILURE; \ } catch (...) { \ LOG(ERROR) << "Exception in aoti_torch: UNKNOWN"; \ return AOTI_TORCH_FAILURE; \ } \ return AOTI_TORCH_SUCCESS; namespace torch::aot_inductor { inline at::Tensor* tensor_handle_to_tensor_pointer(AtenTensorHandle handle) { return reinterpret_cast<at::Tensor*>(handle); } inline AtenTensorHandle tensor_pointer_to_tensor_handle(at::Tensor* tensor) { return reinterpret_cast<AtenTensorHandle>(tensor); } inline at::Tensor resolve_tensor_dispatch_flags(AtenTensorHandle handle) { at::Tensor* tensor{tensor_handle_to_tensor_pointer(handle)}; if (tensor->is_conj() || tensor->is_neg()) { // If the conjugation or negation dispatch flags are set, runtime dispatch // handles them by cloning the tensor before passing them to the native ATen // function. Since the C-shim calls the native function directly, we have // to handle the flags ourselves, or results will be silently incorrect. return tensor->clone(); } return *tensor; } inline std::optional<at::Tensor> resolve_tensor_dispatch_flags( const AtenTensorHandle* handle) { return handle ? std::make_optional(resolve_tensor_dispatch_flags(*handle)) : std::nullopt; } inline std::vector<at::Tensor> resolve_tensor_list_dispatch_flags( const AtenTensorHandle* handle, int64_t len) { std::vector<at::Tensor> ret{}; ret.reserve(len); for (int64_t i{0}; i < len; ++i) { ret.emplace_back(resolve_tensor_dispatch_flags(handle[i])); } return ret; } inline std::vector<std::optional<at::Tensor>> resolve_tensor_list_dispatch_flags( const AtenTensorHandle** handle, int64_t len) { std::vector<std::optional<at::Tensor>> ret{}; ret.reserve(len); for (int64_t i{0}; i < len; ++i) { ret.emplace_back(resolve_tensor_dispatch_flags(handle[i])); } return ret; } inline at::Generator* generator_handle_to_generator_pointer( AtenGeneratorHandle handle) { return reinterpret_cast<at::Generator*>(handle); } inline AtenGeneratorHandle generator_pointer_to_generator_handle( at::Generator* generator) { return reinterpret_cast<AtenGeneratorHandle>(generator); } inline AtenTensorHandle new_tensor_handle(at::Tensor&& tensor) { at::Tensor* new_tensor = new at::Tensor(std::move(tensor)); return tensor_pointer_to_tensor_handle(new_tensor); } inline void assert_inf_and_nan( const std::string& tensor_name, at::Tensor& check_tensor) { auto isnan_tensor = check_tensor.isnan(); if (isnan_tensor.any().item<bool>()) { throw std::runtime_error("At least one NaN in " + tensor_name); } auto isinf_tensor = check_tensor.isinf(); if (isinf_tensor.any().item<bool>()) { throw std::runtime_error("At least one INF in " + tensor_name); } } // utility functions to convert a pointer to an optional value template <class T> inline std::optional<T> pointer_to_optional(T* ptr) { return ptr ? std::make_optional(*ptr) : std::nullopt; } template <class T, class U, typename = std::enable_if_t<!std::is_same_v<T, U>>> inline std::optional<T> pointer_to_optional(U* ptr) { return ptr ? std::make_optional<T>(T(*ptr)) : std::nullopt; } template <> inline std::optional<at::Tensor> pointer_to_optional(AtenTensorHandle* ptr) { return ptr ? std::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) : std::nullopt; } template <> inline std::optional<at::Tensor> pointer_to_optional( const AtenTensorHandle* ptr) { return ptr ? std::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) : std::nullopt; } template <> inline std::optional<at::Generator> pointer_to_optional( AtenGeneratorHandle* ptr) { return ptr ? std::make_optional(*generator_handle_to_generator_pointer(*ptr)) : std::nullopt; } inline std::optional<c10::Device> pointer_to_optional_device( int32_t* device_type, int32_t device_index) { return device_type ? std::make_optional(c10::Device( static_cast<c10::DeviceType>(*device_type), static_cast<c10::DeviceIndex>(device_index))) : std::nullopt; } // utility functions to convert a pointer to a list template <typename T> struct is_optional : std::false_type {}; template <typename T> struct is_optional<std::optional<T>> : std::true_type {}; template <class T> inline c10::ArrayRef<T> pointer_to_list(T* ptr, int64_t len) { return c10::ArrayRef<T>(ptr, len); } template < class T, class U, typename = std::enable_if_t<!std::is_same_v<T, U>>, typename = std::enable_if_t<!is_optional<T>::value>> inline std::vector<T> pointer_to_list(U* ptr, int64_t len) { // std::vector<T> will be implicitly converted to c10::ArrayRef<T> at the call // site std::vector<T> result; result.reserve(len); for (int64_t i = 0; i < len; i++) { result.emplace_back(T(ptr[i])); } return result; } template <class T, class U, typename = std::enable_if_t<is_optional<T>::value>> inline std::vector<T> pointer_to_list(U** ptr, int64_t len) { // Here U** denotes a list of optional arguments // std::vector<T> will be implicitly converted to c10::ArrayRef<T> at the call // site std::vector<T> result; result.reserve(len); for (int64_t i = 0; i < len; i++) { result.emplace_back(pointer_to_optional(ptr[i])); } return result; } template <> inline std::vector<at::Tensor> pointer_to_list( const AtenTensorHandle* ptr, int64_t len) { std::vector<at::Tensor> result; result.reserve(len); for (int64_t i = 0; i < len; i++) { result.emplace_back(*tensor_handle_to_tensor_pointer(ptr[i])); } return result; } template <> inline std::vector<std::optional<at::Tensor>> pointer_to_list( const AtenTensorHandle** ptr, int64_t len) { std::vector<std::optional<at::Tensor>> result; result.reserve(len); for (int64_t i = 0; i < len; i++) { result.emplace_back(pointer_to_optional<at::Tensor>(ptr[i])); } return result; } template <int N> inline std::array<bool, N> pointer_to_list(const int32_t* ptr) { std::array<bool, N> result; std::copy(ptr, ptr + N, result.begin()); return result; } // Utility function to convert a pointer to an optional list of values template <class T, class U> inline std::optional<c10::ArrayRef<T>> pointer_to_optional_list( U** ptr, int64_t len) { return ptr ? std::make_optional<c10::ArrayRef<T>>(pointer_to_list<T>(*ptr, len)) : std::nullopt; } } // namespace torch::aot_inductor ```
========================================================================================================================================== SOURCE CODE FILE: array_ref_impl.h LINES: 1 SIZE: 3.01 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\array_ref_impl.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_runtime/arrayref_tensor.h> #include <torch/csrc/inductor/aoti_runtime/scalar_to_tensor.h> #include <torch/csrc/inductor/aoti_runtime/thread_local.h> #include <torch/csrc/inductor/aoti_torch/utils.h> namespace torch::aot_inductor { template <typename T> void convert_output_to_handle( const ArrayRefTensor<T>& output, AtenTensorHandle& handle) { handle = output.expensiveCopyToTensor(); } template <typename... Ts, std::size_t... Is> void convert_outputs_to_handles_helper( const std::tuple<ArrayRefTensor<Ts>...>& outputs, AtenTensorHandle* output_handles, std::index_sequence<Is...>) { (convert_output_to_handle(std::get<Is>(outputs), output_handles[Is]), ...); } template <typename... Ts> void convert_outputs_to_handles( const std::tuple<ArrayRefTensor<Ts>...>& outputs, AtenTensorHandle* output_handles) { convert_outputs_to_handles_helper( outputs, output_handles, std::make_index_sequence<sizeof...(Ts)>()); } template <typename T> void convert_handle_to_arrayref_tensor( AtenTensorHandle handle, ArrayRefTensor<T>& input) { void* data_ptr; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(handle, &data_ptr)); int64_t dim; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_dim(handle, &dim)); int64_t numel; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_numel(handle, &numel)); int64_t* sizes; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_sizes(handle, &sizes)); int64_t* strides; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides(handle, &strides)); int32_t dtype; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(handle, &dtype)); int32_t device_type; AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_device_type(handle, &device_type)); int32_t device_index; AOTI_TORCH_ERROR_CODE_CHECK( aoti_torch_get_device_index(handle, &device_index)); input = ArrayRefTensor<T>( MiniArrayRef<T>(reinterpret_cast<T*>(data_ptr), numel), MiniArrayRef<const int64_t>(sizes, dim), MiniArrayRef<const int64_t>(strides, dim), device_type, device_index); } template <typename... Ts, std::size_t... Is> void convert_handles_to_inputs_helper( AtenTensorHandle* input_handles, std::tuple<ArrayRefTensor<Ts>...>& inputs, std::index_sequence<Is...>) { (convert_handle_to_arrayref_tensor(input_handles[Is], std::get<Is>(inputs)), ...); } template <typename... Ts> void convert_handles_to_inputs( AtenTensorHandle* input_handles, std::tuple<ArrayRefTensor<Ts>...>& inputs) { convert_handles_to_inputs_helper( input_handles, inputs, std::make_index_sequence<sizeof...(Ts)>()); } template <typename T> void assert_numel(const ArrayRefTensor<T>& tensor, uint64_t numel) { if (tensor.numel() != numel) { std::stringstream err; err << "incorrect numel for input tensor. expected " << numel << ", got " << tensor.numel(); throw std::runtime_error(err.str()); } } } // namespace torch::aot_inductor ```
================================================================================================================================================= SOURCE CODE FILE: array_ref.h LINES: 1 SIZE: 0.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\array_ref.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_runtime/arrayref_tensor.h> #include <torch/csrc/inductor/aoti_runtime/thread_local.h> #include <torch/csrc/inductor/array_ref_impl.h> #include <torch/csrc/inductor/cpp_wrapper/common.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/cpu.h> ```
============================================================================================================================================== SOURCE CODE FILE: common.h LINES: 1 SIZE: 1.10 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\common.h ENCODING: utf-8 ```h #pragma once #include <array> #include <filesystem> #include <optional> #include <Python.h> #define PYBIND11_SIMPLE_GIL_MANAGEMENT #include <pybind11/gil.h> namespace py = pybind11; class RAIIPyObject { public: RAIIPyObject() : obj_(nullptr) {} RAIIPyObject(PyObject* obj) : obj_(obj) {} ~RAIIPyObject() { Py_XDECREF(obj_); } RAIIPyObject& operator=(const RAIIPyObject& other) { if (this != &other) { Py_XDECREF(obj_); obj_ = other.obj_; Py_XINCREF(obj_); } return *this; } operator PyObject*() { return obj_; } PyObject* get() { return obj_; } private: PyObject* obj_; }; #include <torch/csrc/inductor/aoti_runtime/device_utils.h> #include <torch/csrc/inductor/aoti_runtime/utils.h> using namespace torch::aot_inductor; #include <c10/util/generic_math.h> #include <torch/csrc/inductor/aoti_runtime/scalar_to_tensor.h> using half = at::Half; using bfloat16 = at::BFloat16; // Round up to the nearest multiple of 64 [[maybe_unused]] inline int64_t align(int64_t nbytes) { return (nbytes + 64 - 1) & -64; } ```
=========================================================================================================================================== SOURCE CODE FILE: cpu.h LINES: 1 SIZE: 0.13 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\cpu.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/cpp_wrapper/common.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/cpu.h> ```
============================================================================================================================================ SOURCE CODE FILE: cuda.h LINES: 1 SIZE: 0.13 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\cuda.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/cpp_wrapper/common.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/cuda.h> ```
=========================================================================================================================================================== SOURCE CODE FILE: cpu.h LINES: 1 SIZE: 0.08 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\device_internal\cpu.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h> ```
============================================================================================================================================================ SOURCE CODE FILE: cuda.h LINES: 1 SIZE: 0.14 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\device_internal\cuda.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_runtime/utils_cuda.h> #include <torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.h> ```
=========================================================================================================================================================== SOURCE CODE FILE: xpu.h LINES: 1 SIZE: 0.20 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\device_internal\xpu.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/aoti_runtime/sycl_runtime_wrappers.h> #include <torch/csrc/inductor/aoti_runtime/utils_xpu.h> #include <torch/csrc/inductor/aoti_torch/generated/c_shim_xpu.h> ```
=========================================================================================================================================== SOURCE CODE FILE: xpu.h LINES: 1 SIZE: 0.13 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\cpp_wrapper\xpu.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/inductor/cpp_wrapper/common.h> #include <torch/csrc/inductor/cpp_wrapper/device_internal/xpu.h> ```
======================================================================================================================================== SOURCE CODE FILE: inductor_ops.h LINES: 1 SIZE: 1.11 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\inductor\inductor_ops.h ENCODING: utf-8 ```h #pragma once #include <ATen/Tensor.h> namespace torch::inductor { TORCH_API at::Tensor _mm_plus_mm_out( at::Tensor& out, const at::Tensor& a, const at::Tensor& b, const at::Tensor& c, const at::Tensor& d); // After adding _mm_plus_mm_out, this should not be exposed and called by model // code. Keeping it around for backward compatibility. Will be deprecated later. TORCH_API at::Tensor _mm_plus_mm( const at::Tensor& a, const at::Tensor& b, const at::Tensor& c, const at::Tensor& d, at::Tensor& out); TORCH_API at::Tensor _alloc_from_pool( const at::Tensor& self, int64_t offset_bytes, at::ScalarType dtype, at::IntArrayRef size, at::IntArrayRef stride); // Similar to as_strided with the following differences // - offset is added to the existing offset (rather than replacing it) // - view tracking is disabled similar to unsafe_view TORCH_API at::Tensor _reinterpret_tensor( const at::Tensor& self, at::IntArrayRef size, at::IntArrayRef stride, int64_t offset_increment = 0); } // namespace torch::inductor ```
============================================================================================================================== SOURCE CODE FILE: itt_wrapper.h LINES: 1 SIZE: 0.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\itt_wrapper.h ENCODING: utf-8 ```h #ifndef PROFILER_ITT_H #define PROFILER_ITT_H #include <c10/macros/Export.h> namespace torch::profiler { TORCH_API bool itt_is_available(); TORCH_API void itt_range_push(const char* msg); TORCH_API void itt_range_pop(); TORCH_API void itt_mark(const char* msg); } // namespace torch::profiler #endif // PROFILER_ITT_H ```
=========================================================================================================================================== SOURCE CODE FILE: compilation_unit.h LINES: 1 SIZE: 11.76 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\api\compilation_unit.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/function.h> #include <c10/util/Exception.h> #include <torch/csrc/jit/api/function_impl.h> #include <torch/csrc/jit/frontend/name_mangler.h> #include <torch/csrc/jit/frontend/source_range.h> #include <torch/csrc/jit/ir/ir.h> #include <torch/csrc/jit/runtime/graph_executor.h> #include <torch/csrc/Export.h> #include <ATen/core/function_schema.h> #include <ATen/core/qualified_name.h> #include <c10/util/ArrayRef.h> #include <optional> #include <functional> #include <memory> #include <mutex> #include <ostream> #include <string> #include <unordered_map> #include <vector> namespace torch::jit { struct Def; struct Property; struct ClassDef; struct SugaredValue; struct Resolver; using ResolverPtr = std::shared_ptr<Resolver>; struct Self { virtual ~Self() = default; virtual std::shared_ptr<SugaredValue> makeSugared(Value* v) const = 0; virtual ClassTypePtr getClassType() const = 0; }; // A CompilationUnit is a list of named Functions // with helper methods to iterate the list or invoke the function. // Classes have a CompilationUnit holding the class methods, // and Modules have a CompilationUnit holding the Functions that // are used to implement their Methods struct TORCH_API CompilationUnit { enum class FunctionType { Method, Hook, PreHook }; // constructor that takes a set of functions to compile using the native // resolver explicit CompilationUnit(const std::string& source); CompilationUnit() = default; CompilationUnit& operator=(CompilationUnit&&) = default; CompilationUnit(CompilationUnit&&) = default; CompilationUnit& operator=(const CompilationUnit&) = delete; CompilationUnit(const CompilationUnit&) = delete; Function* find_function(const c10::QualifiedName& name) const { auto it = dict_.find(name); if (it == dict_.end()) { return nullptr; } return functions_[it->second].get(); } Function& get_function(const c10::QualifiedName& name) const { if (auto r = find_function(name)) { return *r; } TORCH_CHECK(false, "attempted to get undefined function ", name.name()); } void set_optimized(bool o) { TORCH_WARN( "CompilationUnit::set_optimized() is deprecated and has no effect. " "Please use setGraphExecutorOptimize()"); } bool is_optimized() const { TORCH_WARN( "CompilationUnit::is_optimized() is deprecated and always returns true. " "Please use getGraphExecutorOptimize()"); return true; } // for historic reasons, these are defined in ir_emitter.cpp // Returns the list of Functions just defined. std::vector<Function*> define( const std::optional<c10::QualifiedName>& prefix, const std::vector<Property>& properties, const std::vector<ResolverPtr>& propResolvers, const std::vector<Def>& definitions, const std::vector<ResolverPtr>& defResolvers, /* determines how we handle free variables in each definition*/ // if non-null, the first argument to each def, is bound to this value const Self* self, // see [name mangling] bool shouldMangle = false, std::optional<size_t> operator_set_version = std::nullopt); void define_hooks( const std::optional<c10::QualifiedName>& prefix, const std::vector<Def>& hookDefs, const std::vector<ResolverPtr>& hookResolvers, const std::vector<Def>& preHookDefs, const std::vector<ResolverPtr>& preHookResolvers, const Self* self, bool shouldMangle = false); // same as above but parse the definitions from source // Returns the list of Functions just defined. std::vector<Function*> define( // prefix namespace to put all the defined functions into const std::optional<c10::QualifiedName>& prefix, const std::string& source, const ResolverPtr& resolver, const Self* self); void define_interface( const c10::QualifiedName& qualifiedName, const ClassDef& classDef, ResolverPtr rcb, bool is_module = false); Function* create_function( c10::QualifiedName name, std::shared_ptr<Graph> graph, bool shouldMangle = false) { if (shouldMangle) { name = mangle(name); } auto fn = std::make_unique<GraphFunction>( std::move(name), std::move(graph), nullptr); auto ret = fn.get(); register_function(std::move(fn)); return ret; } std::vector<Function*> get_functions() const { return fmap(functions_, [](const std::unique_ptr<Function>& fn) { return fn.get(); }); } /// Run a method from this compilation. /// /// For example: /// @code /// IValue output = module->run("relu_script", a, b); /// @endcode /// /// To get a compile a module from a source string, see torch::jit::compile /// /// @param method_name The name of the method to run /// @param args Arguments to be passed to the method /// @return An IValue containing the return value (or values if it is a tuple) /// from the method template <typename... Types> IValue run_method(const c10::QualifiedName& method_name, Types&&... args) { return get_function(method_name)({IValue(std::forward<Types>(args))...}); } void drop_all_functions() { dict_.clear(); functions_.clear(); } /** * Register a class as being owned by this compilation unit. */ void register_type(c10::NamedTypePtr namedType) { // TODO: class types cannot be redefined because we have no way right now // of invalidating their methods. NamedTuples are fine though, since they // don't have methods. TORCH_CHECK( 0 == classDict_.count(*namedType->name()), "class '", namedType->name()->qualifiedName(), "' already defined."); classes_.push_back(std::move(namedType)); classDict_[*classes_.back()->name()] = classes_.size() - 1; } c10::ClassTypePtr get_class(const c10::QualifiedName& name) const { auto type = get_type(name); if (!type) { return nullptr; } return type->cast<c10::ClassType>(); } c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const { auto type = get_type(name); if (!type) { return nullptr; } return type->cast<c10::InterfaceType>(); } c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const { for (const auto& cls : classes_) { if (cls->name()->qualifiedName() == name.qualifiedName()) { return cls->expect<TupleType>(); } } return nullptr; } c10::NamedTypePtr get_type(const c10::QualifiedName& name) const { auto it = classDict_.find(name); if (it == classDict_.end()) { return nullptr; } return classes_[it->second]; } // For testing: clear all Python-defined classes to ensure that unit tests // have isolation. void _clear_python_cu() { // Delete all the associated class methods for (const auto& type : classes_) { if (auto cls = type->cast<ClassType>()) { for (auto method : cls->methods()) { // Tombstone the method in the compilation unit. // Don't erase because the dict_ auto it = dict_.find(method->qualname()); if (it != dict_.end()) { functions_[it->second] = nullptr; // Erase in our big lookup table dict_.erase(it); } } // Classes can have multiple pointers to the same hook, // need to make sure to not delete it twice std::unordered_set<Function*> hooks_to_delete; for (const auto& hook : cls->getForwardHooks()) { hooks_to_delete.insert(hook); } for (const auto& pre_hook : cls->getForwardPreHooks()) { hooks_to_delete.insert(pre_hook); } for (const auto& hook : hooks_to_delete) { // Tombstone the hook in the compilation unit. auto it = dict_.find(hook->qualname()); if (it != dict_.end()) { functions_[it->second] = nullptr; // Erase in our big lookup table dict_.erase(it); } } } } classes_.clear(); classDict_.clear(); } // [Internal Only] Remove method. // Note Used for freezing. void unsafeRemoveMethod(const c10::QualifiedName& method_name) { auto it = dict_.find(method_name); TORCH_CHECK( it != dict_.end(), "method '", method_name.qualifiedName(), "' does not exist."); functions_[it->second] = nullptr; dict_.erase(it); } // [name mangling] All code objects must have a unique qualified name in a // CompilationUnit. In Python, sometimes functions won't have unique qualified // name (for example, nested functions). So we mangle Python functions to // ensure that they are uniquely named. // // We also use mangling to distinguish different Module instances. Since each // Module is a singleton class instance, different instances of the same // Python Module will have different types but the same qualified name. c10::QualifiedName mangle(const c10::QualifiedName& name) const { auto mangled = name; while (get_type(mangled) || find_function(mangled)) { mangled = mangler_.mangle(mangled); } return mangled; } private: std::unique_ptr<Function> define( const std::optional<c10::QualifiedName>& prefix, const Def& def, const ResolverPtr& resolver, const Self* self, const std::unordered_map<std::string, Function*>& function_table, bool shouldMangle = false, FunctionType type = FunctionType::Method, std::optional<size_t> version = std::nullopt) const; // Define a property on \p self. struct PropertyPair; PropertyPair define_property( const std::optional<c10::QualifiedName>& prefix, const Property& prop, const ResolverPtr& resolver, const Self* self, const std::unordered_map<std::string, Function*>& function_table, bool shouldMangle = false) const; Function& register_function(std::unique_ptr<Function> fn) { TORCH_CHECK( 0 == dict_.count(fn->qualname().qualifiedName()), "method '", fn->qualname().qualifiedName(), "' already defined."); functions_.emplace_back(std::move(fn)); dict_[functions_.back()->qualname()] = functions_.size() - 1; return *functions_.back(); } std::vector<std::unique_ptr<Function>> functions_; // for fast lookup std::unordered_map<c10::QualifiedName, size_t> dict_; std::unordered_map<c10::QualifiedName, size_t> classDict_; // [class ownership] Right now there are two relationships between classes // and compilation units: // 1. Classes have compilation units internally that hold their methods. // 2. On load, the TypePtrs of any imported classes are owned by the main // module's compilation unit. std::vector<c10::NamedTypePtr> classes_; mutable NameMangler mangler_; }; // An owning pointer to a Function. Just a pair of a raw Function ptr and it's // owning CU. We need this because pybind requires a ref-counted way to refer to // Functions. struct StrongFunctionPtr { StrongFunctionPtr(std::shared_ptr<CompilationUnit> cu, Function* function) : cu_(std::move(cu)), function_(function) { TORCH_INTERNAL_ASSERT(cu_); TORCH_INTERNAL_ASSERT(function_); } std::shared_ptr<CompilationUnit> cu_; Function* function_; }; namespace script { // We once had a `script::` namespace that was deleted. This is for backcompat // of the public API; new code should not use this type alias. using CompilationUnit = ::torch::jit::CompilationUnit; } // namespace script } // namespace torch::jit ```
======================================================================================================================================== SOURCE CODE FILE: function_impl.h LINES: 1 SIZE: 5.69 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\api\function_impl.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/function.h> #include <torch/csrc/jit/ir/ir.h> #include <torch/csrc/jit/runtime/graph_executor.h> namespace torch::jit { struct TORCH_API GraphFunction : public Function { GraphFunction( c10::QualifiedName name, std::shared_ptr<Graph> graph, std::function<void(GraphFunction&)> function_creator, std::optional<ExecutorExecutionMode> executor_execution_mode = std::nullopt) : name_(std::move(name)), graph_(std::move(graph)), executor_execution_mode_(executor_execution_mode), function_creator_(std::move(function_creator)) {} bool isGraphFunction() const override { return true; } void run(Stack& stack) override; std::function<void(GraphFunction&)> function_creator() const { return function_creator_; } c10::intrusive_ptr<c10::ivalue::Future> runAsync( Stack& stack, TaskLauncher taskLauncher = at::launch) override; std::shared_ptr<Graph> graph() const { return graph_; } std::shared_ptr<Graph> optimized_graph() const; const c10::QualifiedName& qualname() const override { return name_; } // private/unstable api. sets the initial execution mode // will not affect executor if there is an existing executor // created for this function void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) { executor_execution_mode_ = mode; } // private/unstable api. sets flag of whether or not to ignore amp. // will not affect executor if there is an existing executor // created for this function void _set_ignore_amp(bool ignore_amp) { force_no_amp_ = ignore_amp; } // if this isn't yet defined, run its method_creator function void ensure_defined() override; size_t num_inputs() const override { return graph()->inputs().size(); } Function& setSchema(FunctionSchema schema) override { schema_ = std::make_unique<FunctionSchema>(std::move(schema)); return *this; } const FunctionSchema& getSchema() const override; GraphExecutorState getDebugState() { return get_executor().getDebugState(); } bool is_optimized() const { TORCH_WARN( "GraphFunction::is_optimized() is deprecated and always returns true. " "Please use getGraphExecutorOptimize()"); return true; } void check_single_output() { TORCH_CHECK( graph()->outputs().size() == 1, "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs"); } GraphExecutor& get_executor() { ensure_defined(); std::lock_guard<std::recursive_mutex> lock(compile_mutex); auto& executor = executors_[currentSpecialization()]; if (executor) { return *executor; } check_single_output(); const std::string& name = name_.name(); std::shared_ptr<Graph> opt_graph = optimized_graph(); if (!executor_execution_mode_) { executor = GraphExecutor(opt_graph, name); } else { executor = GraphExecutor(opt_graph, name, *executor_execution_mode_); } return *executor; } using Function::call; bool call( Stack& stack, std::optional<size_t> bailOut, c10::function_ref<void(const Code&)> f) override { f(get_executor().getPlanFor(stack, bailOut).code); return true; } void clear_optimized_graphs() { optimized_graphs_.fill(nullptr); } private: enum SpecializationKey { AutocastOff, CpuAutocastOn, GpuAutocastOn, CpuGpuAutocastOn, // This provides the number of specializations // (Must be last entry) TotalCount }; SpecializationKey currentSpecialization() const; private: c10::QualifiedName name_; // The original, non-optimized graph std::shared_ptr<Graph> graph_; // for debugging and for inlining // allows users to specify Simple/Profiling Executor for function // TODO: add more executors mutable std::optional<ExecutorExecutionMode> executor_execution_mode_; // if invoked on a graph that has already traced through amp // don't invoke amp pass mutable bool force_no_amp_ = false; // Optimized graph, computed lazily. Used for inlining. mutable std::array<std::shared_ptr<Graph>, SpecializationKey::TotalCount> optimized_graphs_; // GraphFunctions are invokable from multiple threads, so this lock needs to // be held when we're initializing graph executor for the first time or // computing the optimized graph. We're using reentrant mutex so that we don't // need to worry about causing a deadlock by calling one method from another // (e.g. optimized_graph() from get_executor()). mutable std::recursive_mutex compile_mutex; // executor_[0] - autocast off // executor_[1] - autocast cpu on // executor_[2] - autocast gpu on // executor_[3] - autocast cpu & gpu on std::array<std::optional<GraphExecutor>, SpecializationKey::TotalCount> executors_; // an optional function that actually creates the method when // ensure_defined() is called. This is used by the compiler so // that it can construct methods out of order std::function<void(GraphFunction&)> function_creator_; // if absent, then we generate a default schema based on the graph // mutable because getSchema caches the default schema if one is requested // before a call to setSchema mutable std::unique_ptr<FunctionSchema> schema_; }; // Short hands for dynamic_cast<GraphFunction*>. TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept; TORCH_API GraphFunction& toGraphFunction(Function&); TORCH_API const GraphFunction& toGraphFunction(const Function&); } // namespace torch::jit ```
================================================================================================================================= SOURCE CODE FILE: method.h LINES: 1 SIZE: 2.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\api\method.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/function.h> #include <ATen/core/ivalue.h> #include <ATen/core/stack.h> #include <torch/csrc/api/include/torch/imethod.h> #include <torch/csrc/jit/api/function_impl.h> namespace torch::jit { using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>; // A method in a module, e.g. f in: // // class M(ScriptModule): // @script_method // def f(self, x): // ... // Note: because Method/Module are exposed to python these // classes use python method naming conventions struct TORCH_API Method : public torch::IMethod { Method(ObjectPtr owner, Function* function); // the module that contains this method. Module owner() const; // the raw objectptr that owns this method, for when the method is owned by a // torchbind object. ObjectPtr raw_owner() const; void run(Stack& stack); void run(Stack&& stack) { run(stack); } c10::IValue operator()( std::vector<c10::IValue> stack, const Kwargs& kwargs = Kwargs()) const override; // Run method async. Invocation on this function would invokes a JIT // interpreter that executes ops inline, one by one, on caller's thread. A // model can utilize async op, i.e. `fork`, to launch an asynchronous task // which will be launched on provided `taskLauncher`. c10::intrusive_ptr<c10::ivalue::Future> run_async( std::vector<c10::IValue> stack, const Kwargs& kwargs = Kwargs(), TaskLauncher taskLauncher = at::launch); std::shared_ptr<Graph> graph() const { return toGraphFunction(*function_).graph(); } const std::string& name() const override { return function_->name(); } size_t num_inputs() const { return function_->num_inputs(); } GraphExecutor& get_executor() { return toGraphFunction(*function_).get_executor(); } Function& function() const { return *function_; } private: void setArgumentNames(std::vector<std::string>&) const override; // Methods are uniqued onwed by a single module. This raw pointer allows // looking up the module. ObjectPtr owner_; // Underlying unbound function Function* function_; }; namespace script { // We once had a `script::` namespace that was deleted. This is for backcompat // of the public API; new code should not use this type alias. using Method = ::torch::jit::Method; } // namespace script } // namespace torch::jit ```
================================================================================================================================= SOURCE CODE FILE: module.h LINES: 1 SIZE: 23.60 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\api\module.h ENCODING: utf-8 ```h #pragma once #include <c10/util/Exception.h> #include <torch/csrc/autograd/variable.h> #include <torch/csrc/jit/api/object.h> #include <torch/csrc/jit/frontend/source_range.h> #include <torch/csrc/jit/ir/ir.h> #include <torch/csrc/jit/ir/named_value.h> #include <torch/csrc/jit/runtime/argument_spec.h> #include <torch/csrc/jit/runtime/graph_executor.h> #include <torch/csrc/Export.h> #include <torch/csrc/api/include/torch/ordered_dict.h> #include <torch/csrc/jit/api/compilation_unit.h> #include <ATen/core/function_schema.h> #include <ATen/core/qualified_name.h> #include <c10/util/ArrayRef.h> #include <c10/util/irange.h> #include <optional> #include <functional> #include <memory> #include <mutex> #include <ostream> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> // This file contains classes which assist in desugaring Python style // modules and their methods into flattened graphs which don't have any // function calls. namespace torch::jit { using ::c10::Argument; using ::c10::FunctionSchema; using ::c10::QualifiedName; // Map which stores filename to content. using ExtraFilesMap = std::unordered_map<std::string, std::string>; using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>; struct Module; template <typename T> struct slot_list_impl; template <typename T> struct Named { std::string name; T value; }; using NameModule = Named<Module>; using NameValue = Named<IValue>; using NameTensor = Named<at::Tensor>; namespace detail { struct TORCH_API ModulePolicy; struct TORCH_API ParameterPolicy; struct TORCH_API AttributePolicy; struct TORCH_API BufferPolicy; template <typename P> struct NamedPolicy; } // namespace detail using module_list = slot_list_impl<detail::ModulePolicy>; using named_module_list = slot_list_impl<detail::NamedPolicy<detail::ModulePolicy>>; using parameter_list = slot_list_impl<detail::ParameterPolicy>; using named_parameter_list = slot_list_impl<detail::NamedPolicy<detail::ParameterPolicy>>; using attribute_list = slot_list_impl<detail::AttributePolicy>; using named_attribute_list = slot_list_impl<detail::NamedPolicy<detail::AttributePolicy>>; using buffer_list = slot_list_impl<detail::BufferPolicy>; using named_buffer_list = slot_list_impl<detail::NamedPolicy<detail::BufferPolicy>>; using ModuleLookup = std::function<Module(const std::vector<std::string>&)>; struct TORCH_API Module : public Object { explicit Module(c10::QualifiedName class_name); Module(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type); Module() = default; Module(const Module&) = default; Module& operator=(const Module&) = default; Module(Module&&) noexcept = default; Module& operator=(Module&&) noexcept = default; Module( c10::QualifiedName, std::shared_ptr<CompilationUnit> cu, bool shouldMangle = false); Module(ModulePtr module_value) : Object(std::move(module_value)) {} ~Module() = default; void set_optimized(bool o) { TORCH_WARN( "Module::set_optimized() is deprecated and has no effect. " "Please use setGraphExecutorOptimize()"); } bool is_optimized() const { TORCH_WARN( "Module::is_optimized() is deprecated and always returns true. " "Please use getGraphExecutorOptimize()"); return true; } IValue forward(std::vector<IValue> inputs, const Kwargs& kwargs = Kwargs()) { return get_method("forward")(std::move(inputs), kwargs); } // In script modules, buffers are Tensors attribute that are _not_ registered // as parameters. This is different than in nn.Module where there is a special // register_buffer method. With this simplification, we only need to track // whether a slot is a parameter to be able to classify it. void register_buffer(const std::string& name, at::Tensor v) { bool is_param = false; bool is_buffer = true; std::lock_guard<std::mutex> lock(*register_mutex_); type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer); _ivalue()->setAttr(name, std::move(v)); } void register_parameter( const std::string& name, at::Tensor v, bool is_buffer) { std::lock_guard<std::mutex> lock(*register_mutex_); type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer); _ivalue()->setAttr(name, std::move(v)); } void register_attribute( const std::string& name, const TypePtr& t, IValue v, bool is_param = false, bool is_buffer = false) { type()->addOrCheckAttribute(name, t, is_param, is_buffer); _ivalue()->setAttr(name, std::move(v)); } void register_module(const std::string& name, const Module& module) { type()->addOrCheckAttribute(name, module.type()); _ivalue()->setAttr(name, module._ivalue()); } void apply(const std::function<void(Module&)>& fn); buffer_list buffers(bool recurse = true) const; named_buffer_list named_buffers(bool recurse = true) const; module_list children() const; // direct modules named_module_list named_children() const; module_list modules() const; // all modules, including this one, recursively named_module_list named_modules() const; // all tensors involved in gradient optimization parameter_list parameters(bool recurse = true) const; named_parameter_list named_parameters(bool recurse = true) const; // all members of the object, similar to iterating over dir(obj) in python attribute_list attributes(bool recurse = true) const; named_attribute_list named_attributes(bool recurse = true) const; void dump( bool print_method_bodies, bool print_attr_values, bool print_param_values) const; std::string dump_to_str( bool print_method_bodies, bool print_attr_values, bool print_param_values) const; /// Enables "training" mode. void train(bool on = true); /// Calls train(false) to enable "eval" mode. /// Do not override this method, override `train()` instead. void eval() { train(/*on=*/false); } /// True if the module is in training mode. bool is_training() const { return attr("training", true).toBool(); } /// Recursively casts all parameters to the given `dtype` and `device`. /// /// If `non_blocking` is true and the source is in pinned memory and /// destination is on the GPU or vice versa, the copy is performed /// asynchronously with respect to the host. Otherwise, the argument has no /// effect. void to(at::Device device, at::ScalarType dtype, bool non_blocking = false); /// Recursively casts all parameters to the given dtype. /// /// If `non_blocking` is true and the source is in pinned memory and /// destination is on the GPU or vice versa, the copy is performed /// asynchronously with respect to the host. Otherwise, the argument has no /// effect. void to(at::ScalarType dtype, bool non_blocking = false); /// Recursively moves all parameters to the given device. /// /// If `non_blocking` is true and the source is in pinned memory and /// destination is on the GPU or vice versa, the copy is performed /// asynchronously with respect to the host. Otherwise, the argument has no /// effect. void to(at::Device device, bool non_blocking = false); void save( std::ostream& out, const ExtraFilesMap& extra_files = ExtraFilesMap()) const; void save( const std::string& filename, const ExtraFilesMap& extra_files = ExtraFilesMap()) const; void _save_for_mobile( std::ostream& out, const ExtraFilesMap& extra_files = ExtraFilesMap(), bool save_mobile_debug_info = false, bool use_flatbuffer = false) const; void _save_for_mobile( const std::string& filename, const ExtraFilesMap& extra_files = ExtraFilesMap(), bool save_mobile_debug_info = false, bool use_flatbuffer = false) const; Module copy() const; Module deepcopy(std::optional<at::Device> device = std::nullopt) const; // Clones both the underlying `ClassType` and the module instance(data), this // function creates a new `ClassType` and returns a new instance that has the // same data as the current instance but with the new type, shared ClassType // will be preserved as well Module clone(bool inplace = false) const; // Clones both the underlying `ClassType` and the module instance(data), this // function creates a new `ClassType` and returns a new instance that has the // same data as the current instance but with the new type, shared ClassType // will be preserved as well. Also allows the caller to specify a set of // method and attribute names to not clone. Module clone( bool inplace, const std::unordered_set<std::string>& ignored_method, const std::unordered_set<std::string>& ignored_attributes) const; void clone_method(const Module& orig, const std::string& name); IValue operator()(std::vector<IValue> inputs); template <typename... Types> IValue create_class(const c10::QualifiedName& name, Types&&... args) const { return create_class(name, {IValue(std::forward<Types>(args))...}); } IValue create_class(const c10::QualifiedName& name, Stack stack) const; inline bool operator==(const Module& y) const noexcept { return _ivalue() == y._ivalue(); } void set_delete_memory(std::shared_ptr<char> delete_mem) { mem_to_delete_ = std::move(delete_mem); } // A set of functions to maintain input shapes through torch.jit.save and // torch.jit.load. It only works on tensors and lists/dicts of tensors // because tracing is only supported by these types. void store_traced_inputs( const std::string& func_name, std::vector<IValue> inputs) { if (inputs.empty()) { return; } auto c10_inputs = c10::impl::GenericList(AnyType::get()); for (IValue& value : inputs) { // Not checking whether this is traceable type as that is already checked // higher up in the stack and changing that would require a larger // restructuring. c10_inputs.emplace_back(std::move(value)); } traced_inputs_.insert_or_assign(func_name, c10_inputs); } c10::Dict<std::string, c10::impl::GenericList> retrieve_traced_inputs() const { return traced_inputs_; } private: Module clone_impl( std::unordered_map<TypePtr, TypePtr>& type_remap, bool inplace, IValue::HashIdentityIValueMap memo, const std::unordered_set<std::string>& ignored_methods, const std::unordered_set<std::string>& ignored_attributes) const; void clone_method( const Module& orig, const Function& method, const std::unordered_map<TypePtr, TypePtr>& type_remap); c10::QualifiedName getNameForMethod(std::string basename) const { return QualifiedName(*type()->name(), std::move(basename)); } void to_impl( const std::optional<at::Device>& device, const std::optional<at::ScalarType>& dtype, bool non_blocking); // Extra handle for the module to delete when itself is deleted std::shared_ptr<char> mem_to_delete_; // Map of function names to the traced inputs that they have been traced with c10::Dict<std::string, c10::impl::GenericList> traced_inputs_; // Mutex to keep registring buffer or parameter thread safe. std::shared_ptr<std::mutex> register_mutex_ = std::make_shared<std::mutex>(); }; // C++ equivalent api of `torch.jit.freeze`. See documentation there for // details. TORCH_API Module freeze( const Module& module, const std::optional<std::vector<std::string>>& preserved_attrs = std::nullopt, bool optimize_numerics = true); // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation // there for details. TORCH_API Module optimize_for_inference( Module& module, const std::vector<std::string>& other_methods = {}); enum class FusionBehavior { STATIC, DYNAMIC }; using FusionStrategy = std::vector<std::pair<FusionBehavior, size_t>>; // clang-format off /* Sets the type and number of specializations that can occur during fusion. Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC and depth is an integer. Behavior - static vs dynamic: In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined based on some initial profiling runs. In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple shapes are possible. In both cases, we also recompile on new striding behavior, device, or dtype. Behavior - fallback functions & depth: When an input doesn't match the format required by the specialized compiled op, it will run a fallback function. Fallback functions are recursively be compiled and specialized based on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to limit the number of specializations that can be compiled, before giving up on recompiling and falling back to a completely un-fused, un-specialized implementation. The list of (type, depth) pairs controls the type of specializations and the number of specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first two specializations will use static fusions, the following two specializations will use dynamic fusion, and any inputs that satisfy none of the 4 options will run an unfused implementation. NB: in the future, if more as more fusion backends are added there may be more granular apis for specific fusers. */ // clang-format on TORCH_API FusionStrategy getFusionStrategy(); // returns previous strategy TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy); namespace detail { struct TORCH_API SlotCursor { Module module_; int64_t i_; // slot offset, -1 indicates the module itself }; } // namespace detail // This iterator allows the (optionally recursive) enumeration of // the members of a Module. It performs a depth-first pre-order // traversal of the module. The Policy template parameter determines // which slots of the object should be included. For instance, // when iterating parameters, we return the parameter tensors, // but skip modules, buffers, and other attributes. // See ModulePolicy for comments about Policy object's API. template <typename Policy> struct slot_iterator_impl { using SlotCursor = detail::SlotCursor; using value_type = typename Policy::value_type; slot_iterator_impl( Module root, bool recurse, // if true, do a depth-first search, otherwise, just look at // slots of root bool return_module) // if true include root itself as the first thing // visited (used in modules()) : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}), recurse_(recurse) { // advance iterator to first valid element (or the end, if empty) while_not_valid_next(); } // empty cursors_, represents end of iteration slot_iterator_impl() : recurse_(false) {} value_type operator*() const { return Policy::create(cursors_, cur()); } value_type operator->() const { return **this; } slot_iterator_impl& operator++() { next_valid(); return *this; } slot_iterator_impl operator++(int) { // this is really expensive, should we delete it so people don't use it // instead of prefix? slot_iterator_impl old = *this; ++(*this); return old; } private: // return_module() is a corner case where instead of returning a submodule // of root, we are returning root itself, because we are iterating modules(), // which contains the root module itself. // It is represented with a single SlotCursor whose index is -1. bool return_module() const { return top().i_ == -1; } const SlotCursor& top() const { return cursors_.back(); } SlotCursor& top() { return cursors_.back(); } IValue cur() const { return return_module() ? top().module_._ivalue() : top().module_._ivalue()->getSlot(top().i_); } // advance to the next slot in a depth first pre-order traversal of the // modules slots. This function does not guarantee the next slot is a // valid element of the iteration. That is done by valid(). // invariant: !cursors_.empty() void next() { // we just returned the module itself, advance i_ to 0 so we are now // at the first slot of the module. if (return_module()) { ++top().i_; return; } // the last traversal action advanced beyond the number of slots in the // module so continue the iteration in the parent. if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) { cursors_.pop_back(); if (!cursors_.empty()) { ++top().i_; } return; } // if the current thing is a module, we have to scan it for recursive // traversals. We do this by adding a new SlotCursor to track the traversal. if (recurse_ && top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) { cursors_.emplace_back(SlotCursor{cur().toModule(), 0}); return; } // common case: advance to the next slot. ++top().i_; } // is the current position of the iterator a valid one? // otherwise, we have to continue advancing. bool valid() const { return top().i_ < int64_t(top().module_._ivalue()->type()->numAttributes()) && Policy::valid( top().module_._ivalue()->type(), top().i_, top().module_._ivalue()->getSlot(top().i_)); } void while_not_valid_next() { // advance iteration until we are either at the end (cursors_.empty()) // or in a valid state. return_module() is a special case, // and is always considered valid, regardless of Policy, because it is // it is only true when we are iterating modules. while (!cursors_.empty() && !return_module() && !valid()) { next(); } } void next_valid() { // avoid crashing if this is empty if (cursors_.empty()) { return; } // advance to next element, which is maybe not valid next(); while_not_valid_next(); } std::vector<SlotCursor> cursors_; bool recurse_; friend inline bool operator!=( const slot_iterator_impl<Policy>& a, const slot_iterator_impl<Policy>& b) { // we are finished iteration when we have no more iteration SlotCursors. // end is always an empty iterator with no cursors. return (a.cursors_.empty() != b.cursors_.empty()); } }; // This type represents lists of parameters, attributes, and // submodules contained in the module. It is abstract because // they are not stored directly in std::vectors but inside the // module's IValue object itself. template <typename Policy> struct slot_list_impl { using iterator = slot_iterator_impl<Policy>; using const_iterator = slot_iterator_impl<Policy>; using value_type = typename iterator::value_type; slot_iterator_impl<Policy> begin() const { return slot_iterator_impl<Policy>(module_, recurse_, return_module_); } slot_iterator_impl<Policy> end() const { return slot_iterator_impl<Policy>(); } size_t size() const { if (!size_) { size_ = size_t(0); for ([[maybe_unused]] const value_type& _ : *(this)) { ++*size_; } } return *size_; } slot_list_impl(Module module, bool recurse, bool return_module) : module_(std::move(module)), recurse_(recurse), return_module_(return_module), size_(std::nullopt) { if (!recurse && !return_module && Policy::all_slots) { size_ = module_.num_slots(); } } private: Module module_; bool recurse_; bool return_module_; // size of this list, cached on first request // when we need to filter the slot list mutable std::optional<size_t> size_; friend struct Module; }; namespace detail { // slot_iterator_impl always iterate over all the slots in a module, // the Policy template argument determines slots should be returned and their // types struct TORCH_API ModulePolicy { // the type of the value being returned using value_type = Module; // the logic for creating the type being returned, given the raw IValue // of that object. static value_type create( const std::vector<detail::SlotCursor>& cursors, IValue v) { return Module(std::move(v).toObject()); } // is slot i in typ something that this iterator should return, otherwise, // we skip it. static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { return typ->getAttribute(i)->is_module(); } // are we going to return everything? If so, we can optimize the calculate // of the size of the list. static constexpr bool all_slots = false; }; struct TORCH_API ParameterPolicy { using value_type = at::Tensor; static value_type create( const std::vector<detail::SlotCursor>& cursors, IValue v) { return std::move(v).toTensor(); } static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { return typ->is_parameter(i) && v.isTensor(); } static constexpr bool all_slots = false; }; struct TORCH_API BufferPolicy { using value_type = at::Tensor; static value_type create( const std::vector<detail::SlotCursor>& cursors, IValue v) { return std::move(v).toTensor(); } static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) && typ->is_buffer(i); } static constexpr bool all_slots = false; }; struct TORCH_API AttributePolicy { using value_type = IValue; static value_type create( const std::vector<detail::SlotCursor>& cursors, IValue v) { return v; } static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { return true; } static constexpr bool all_slots = true; }; // take a Policy object, and make a version of it that returns the slot. // along with the fully qualified name of that slot. This is used for the named_ // variants like named_parameters(). template <typename Policy> struct NamedPolicy { using value_type = Named<typename Policy::value_type>; static value_type create( const std::vector<detail::SlotCursor>& cursors, IValue v) { std::string name; if (cursors.size() == 1) { name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back()); } else { std::ostringstream ss; for (const auto i : c10::irange(cursors.size())) { if (i > 0) { ss << "."; } ss << nameFragment(cursors[i]); } name = ss.str(); } return value_type{std::move(name), Policy::create(cursors, std::move(v))}; } static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) { return Policy::valid(t, i, v); } static constexpr bool all_slots = Policy::all_slots; private: static std::string nameFragment(const detail::SlotCursor& f) { return f.module_.type()->getAttributeName(f.i_); } }; } // namespace detail TORCH_API bool& getInlineEverythingMode(); namespace script { // We once had a `script::` namespace that was deleted. This is for backcompat // of the public API; new code should not use this type alias. using Module = ::torch::jit::Module; using ExtraFilesMap = ::torch::jit::ExtraFilesMap; } // namespace script } // namespace torch::jit ```
================================================================================================================================= SOURCE CODE FILE: object.h LINES: 1 SIZE: 6.14 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\api\object.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/functional.h> #include <ATen/core/ivalue.h> #include <torch/csrc/jit/api/method.h> #include <optional> #include <utility> namespace torch::jit { struct Resolver; using ResolverPtr = std::shared_ptr<Resolver>; using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>; // Throw this in C++ land if `attr` fails. This will be converted to a Python // AttributeError by the Python binding code class ObjectAttributeError : public std::runtime_error { public: ObjectAttributeError(const std::string& what) : std::runtime_error(what) {} }; struct TORCH_API Object { Object() = default; Object(const Object&) = default; Object& operator=(const Object&) = default; Object(Object&&) noexcept = default; Object& operator=(Object&&) noexcept = default; Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {} Object(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type); Object( c10::QualifiedName, std::shared_ptr<CompilationUnit> cu, bool shouldMangle = false); ObjectPtr _ivalue() const { TORCH_INTERNAL_ASSERT(_ivalue_); return _ivalue_; } c10::ClassTypePtr type() const { return _ivalue()->type(); } struct Property { std::string name; Method getter_func; std::optional<Method> setter_func; }; void setattr(const std::string& name, c10::IValue v) { if (_ivalue()->type()->hasConstant(name)) { TORCH_CHECK( false, "Can't set constant '", name, "' which has value:", _ivalue()->type()->getConstant(name)); } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) { const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot); TORCH_CHECK( v.type()->isSubtypeOf(*expected), "Expected a value of type '", expected->repr_str(), "' for field '", name, "', but found '", v.type()->repr_str(), "'"); _ivalue()->setSlot(*slot, std::move(v)); } else { TORCH_CHECK(false, "Module has no attribute '", name, "'"); } } c10::IValue attr(const std::string& name) const { if (auto r = _ivalue()->type()->findAttributeSlot(name)) { return _ivalue()->getSlot(*r); } if (auto r = _ivalue()->type()->findConstantSlot(name)) { return _ivalue()->type()->getConstant(*r); } std::stringstream err; err << _ivalue()->type()->repr_str() << " does not have a field with name '" << name.c_str() << "'"; throw ObjectAttributeError(err.str()); } c10::IValue attr(const std::string& name, c10::IValue or_else) const { if (auto r = _ivalue()->type()->findAttributeSlot(name)) { return _ivalue()->getSlot(*r); } if (auto r = _ivalue()->type()->findConstantSlot(name)) { return _ivalue()->type()->getConstant(*r); } return or_else; } bool hasattr(const std::string& name) const { return _ivalue()->type()->hasAttribute(name) || _ivalue()->type()->hasConstant(name); } // each object owns its methods. The reference returned here // is guaranteed to stay valid until this module has been destroyed Method get_method(const std::string& name) const { if (auto method = find_method(name)) { return *method; } TORCH_CHECK(false, "Method '", name, "' is not defined."); } const std::vector<Method> get_methods() const { return c10::fmap(type()->methods(), [&](Function* func) { return Method(_ivalue(), func); }); } bool has_property(const std::string& name) const { for (const auto& prop : type()->properties()) { if (prop.name == name) { return true; } } return false; } const Property get_property(const std::string& name) const { for (const auto& prop : type()->properties()) { if (prop.name == name) { std::optional<Method> setter = std::nullopt; if (prop.setter) { setter = Method(_ivalue(), prop.setter); } return Property{ prop.name, Method(_ivalue(), prop.getter), std::move(setter)}; } } TORCH_CHECK(false, "Property '", name, "' is not defined."); } const std::vector<Property> get_properties() const { return c10::fmap(type()->properties(), [&](ClassType::Property prop) { std::optional<Method> setter = std::nullopt; if (prop.setter) { setter = Method(_ivalue(), prop.setter); } return Property{ std::move(prop.name), Method(_ivalue(), prop.getter), std::move(setter)}; }); } std::optional<Method> find_method(const std::string& basename) const; /// Run a method from this module. /// /// For example: /// @code /// IValue output = module->run("relu_script", a, b); /// @endcode /// /// To get a compile a module from a source string, see torch::jit::compile /// /// @param method_name The name of the method to run /// @param args Arguments to be passed to the method /// @return An IValue containing the return value (or values if it is a tuple) /// from the method template <typename... Types> IValue run_method(const std::string& method_name, Types&&... args) { return get_method(method_name)({IValue(std::forward<Types>(args))...}); } // so that C++ users can easily add methods void define(const std::string& src, const ResolverPtr& resolver = nullptr); size_t num_slots() const { return _ivalue()->slots().size(); } // shallow copy the object Object copy() const; // Copies all the attributes of the object recursively without creating new // `ClassType`, including deepcopy of Tensors Object deepcopy() const; private: // mutable be we lazily initialize in module_object. mutable ObjectPtr _ivalue_; }; namespace script { // We once had a `script::` namespace that was deleted. This is for backcompat // of the public API; new code should not use this type alias. using Object = ::torch::jit::Object; } // namespace script } // namespace torch::jit ```
======================================================================================================================================= SOURCE CODE FILE: backend.h LINES: 1 SIZE: 4.05 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/builtin_function.h> #include <ATen/core/stack.h> #include <torch/csrc/jit/backends/backend_interface.h> #include <torch/custom_class.h> namespace torch::jit { namespace { // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) inline c10::FunctionSchema getIsAvailableSchema() { c10::Argument self("self", c10::AnyType::get()); c10::Argument available("available", c10::BoolType::get()); c10::FunctionSchema preprocessor_schema( "is_available", /*overload_name=*/"", /*arguments=*/{self}, /*returns=*/{available}); return preprocessor_schema; } constexpr static auto kBackendsNamespace = "__backends__"; // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) inline c10::FunctionSchema getCompileSchema() { c10::Argument self("self", c10::AnyType::get()); c10::Argument mod("processed", c10::AnyType::get()); auto any_dict_ty = c10::DictType::create(c10::StringType::get(), c10::AnyType::get()); c10::Argument method_compile_spec("method_compile_spec", any_dict_ty); c10::Argument handles("handles", any_dict_ty); c10::FunctionSchema compile_schema( "compile", /*overload_name=*/"", /*arguments=*/{self, mod, method_compile_spec}, /*returns=*/{handles}); return compile_schema; } // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) inline c10::FunctionSchema getExecuteSchema() { auto any_list_ty = c10::ListType::create(c10::AnyType::get()); c10::Argument self("self", c10::AnyType::get()); c10::Argument handle("handle", c10::AnyType::get()); c10::Argument input("input", any_list_ty); c10::Argument output("output", any_list_ty); return c10::FunctionSchema( "execute", /*overload_name=*/"", /*arguments=*/{self, handle, input}, /*returns=*/{output}); } template <typename TBackendInterface> std::function<void(Stack&)> getIsAvailableFunc() { return [](Stack& stack) { auto self = pop(stack).toCustomClass<TBackendInterface>(); auto ret = self->is_available(); push(stack, ret); }; } template <typename TBackendInterface> std::function<void(Stack&)> getCompileFunc() { return [](Stack& stack) { auto method_compile_spec = pop(stack).toGenericDict(); auto processed = pop(stack); auto self = pop(stack).toCustomClass<TBackendInterface>(); auto ret = self->compile(processed, method_compile_spec); push(stack, ret); }; } template <typename TBackendInterface> std::function<void(Stack&)> getExecuteFunc() { return [](Stack& stack) { auto args = pop(stack); auto handle = pop(stack); auto self = pop(stack); auto backend = self.toCustomClass<TBackendInterface>(); auto res = backend->execute(handle, args.toList()); push(stack, res); }; } } // namespace // Static registration API for backends. template <class TBackendInterface> class backend { static_assert( std::is_base_of_v<PyTorchBackendInterface, TBackendInterface>, "torch::jit::backend<T> requires T to inherit from PyTorchBackendInterface"); std::string backend_name_; public: // Registers a new backend with /p name, and the given /p preprocess // function. backend(const std::string& name) : backend_name_(name) { static auto cls = torch::class_<TBackendInterface>(kBackendsNamespace, name) .def(torch::init<>()) ._def_unboxed( "is_available", getIsAvailableFunc<TBackendInterface>(), getIsAvailableSchema()) ._def_unboxed( "compile", getCompileFunc<TBackendInterface>(), getCompileSchema()) ._def_unboxed( "execute", getExecuteFunc<TBackendInterface>(), getExecuteSchema()); } }; } // namespace torch::jit ```
===================================================================================================================================================== SOURCE CODE FILE: backend_debug_handler.h LINES: 1 SIZE: 6.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_debug_handler.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/ivalue.h> #include <torch/csrc/jit/backends/backend_detail.h> #include <torch/csrc/jit/ir/ir.h> #include <torch/csrc/jit/ir/scope.h> #include <atomic> namespace torch::jit { /* * BackendDebugHandleManager is responsible for issuing debug handles to * backends. Debug handles are associated with nodes of a graph. * BackendDebugHandleManager also maintains a map * [debug-handle, DebugInfoTuple = {source range, inlined callstack ptr]} that * will help generate a callstack for exception raised using debug handles. * Effectively debug handles are something that is given to backend and later * when an exception occurs in the backend, backend can tell, using debug * handle, that an exception occurred here. Then the runtime can generate * callstack correspoding to the exception. * There are two parts to BackendDebugHandleManager: * 1. static std::atomic debug_handle * 2. Map of [debug-handle, DebugInfoTuple] * * About 1: * Why do they have to be unique. The reason is that by ensuring * uniqueness of debug handles, we remove the burden of another layer of * mapping where we need to say this set of debug handles were generated for * this lowered module or this bytecode function. This simplifies the API for * serialization since debug handles can uniquely identify DebugInfoTuple. * Thus simplifies the runtime API for throwing exception. Exception throwing * only needs to know debug_handle and not which module or method threw it. * There are 2 issues to keep in mind, though,for static std::atomic * debug_handle: A. Performance implications of using atomic variable. However * this is only used for compilation so we assume to absorb some of that * penalty. Plus if there is no contention then we should have less to worry * about. B. If repeated compilation is part of a long running process then we * may overflow int64_t. We may detect and fail on this. For now this is not * done. * * Now about 2: * There are two usecases for [debug-handle, DebugInfoTuple] * A. During bytecode generation the DebugInfoTuple corresponding to the nodes * of the inlined graph being serialized, are stored in this object and a * unique debug handle is returned. This unique debug handle is stored in * mobile_debug info for pytorch lite models. It will be used for raising * exceptions as well as profiling. B. During backend lowering, each backend's * preprocess/compile method can compile method's graph and serialize those * methods. Once the method is lowered to backend, graph is essentially lost. * Without access to graph it is hard to generate model level debug info. Thus * the debug handles provide a way to map nodes of the graph to the model level * debug info. * * During byte-code model serialization, [debug-handle, DebugInfoTuple] is * serialized. Now we know a. debug handles and b. how to map debug handles to * model source code. Thus we can either do eager symbolication by converting * debug handles to corresponding source code at runtime, or do lazy * symbolicattion offline. * * Note that it is not necessary to serialize [debug-handle, DebugInfoTuple] * corresponding to lowered backend if the lowering process, that is * preprocess/compile, and execution happens in the same session, then eager * symbolication can be employed. * * Now how does BackendDebugHandleManager capture all of the above? * By providing two API. * 1. getNextDebugHandle which given a Node* returns a unique debug handle, * that will uniquely identify DebugInfoTuple. * and * 2. getCallStackPtrMap which returns the map * [debug-handle, DebugInfoTuple] * * 1 provides debug handles to backends and 2 provides runtime a way to map * debug handles to source level debug info. * * So why does debug handle map to DebugInfoTuple = {source range and inlined * cs}? {debug_handle, source_range_tag, serialized_callstack} Take this * example: class L(nn.Module): def __init__(self) -> None: * ... * def forward(self, x): * return x * 5 * class M(nn.Module): * def __init__(self) -> None: * ... * def forward(self, x): * return x - 2 * class N(nn.Module): * def __init__(self) -> None: * self.m = M() * def forward(self, x): * return self.m(x) + 3 * m = torch.jit.script(N()) * Once you inline m's forward method, m.forward.graph will look something * like this * graph(%self...): * %x = aten::mul(..) * %x = aten::sub(x, ..) * %y = aten::add(x, ..) * .. * Inlined callstack ptr for these two nodes will look like: * aten::mul's inlined CS (callstack): [N.forward, source range] -> [M.forward, * source range] aten::sub's inlined CS (callstack): [N.forward, source range] * aten::add's inlined CS: null * mul node's inlined CS contains only information about the callsites' source * range The information about mul node's source range ('return x * 5') is not * available in its inlined CS. It is rather part of node's source range * instead of inlined CS. Thus to get full stack: [N.forward, source range] -> * [M.forward, source range] -> [aten::mul's source range] We need to track * mul's source range and inlined CS both. */ using BackendDebugInfoMapType = std::unordered_map<torch::jit::DebugHandleType, DebugInfoTuple>; /* * This class is used to generate debug info map. * backend's preprocess will call generate_debug_handles (see * backend_detail.cpp), which uses debug_handle_manager to generate debug * handles. When lowering process finishes, calling stopRecording will * return debug info map from debug_handle_manager */ class TORCH_API BackendDebugInfoRecorder { public: BackendDebugInfoRecorder() = default; int64_t getNextDebugHandle(const Node* node); // Reason this is not done as RAII is that work done in stopRecording // can throw, and throwing with dtor will call terminate and thus voids any // exception catching at a higher level. BackendDebugInfoMapType stopRecording(); NodeToDebugHandle generate_debug_handles(const std::shared_ptr<Graph>& graph); private: static std::atomic<DebugHandleType> unique_debug_handle_; BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_; }; } // namespace torch::jit ```
================================================================================================================================================== SOURCE CODE FILE: backend_debug_info.h LINES: 1 SIZE: 2.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_debug_info.h ENCODING: utf-8 ```h #pragma once #ifndef BUILD_LITE_INTERPRETER #include <torch/csrc/jit/backends/backend_debug_handler.h> #endif #include <torch/custom_class.h> namespace torch::jit { constexpr static auto kBackendUtilsNamespace = "backendutils"; constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo"; #ifndef BUILD_LITE_INTERPRETER /* * Custom class for holding debug information in lowered modules, intended * purely for keeping this information to be later serialized outside of the * lowered module itself. * Its usage pattern is: * 1. LoweredModule declares an instance of this class in __backend_debug_info * 2. During serialization, __backend_debug_info is used to obtain the debug * information. * 3. The contents of LoweredModule.__backend_debug_info are not serialized * within the LoweredModule itself. */ class TORCH_API PyTorchBackendDebugInfo : public torch::CustomClassHolder { public: PyTorchBackendDebugInfo() = default; std::optional<BackendDebugInfoMapType>& getDebugInfoMap() { return debug_info_map_; } void setDebugInfoMap(BackendDebugInfoMapType&& debug_info_map) { debug_info_map_ = std::move(debug_info_map); } private: std::optional<BackendDebugInfoMapType> debug_info_map_; }; #else /* * Dummy instance exists for the following reason: * __backend_debug_info is of type BackendDebugInfo which is a torchbind' * class backed by cpp class PyTorchBackendDebugInfo. * PyTorchBackendDebugInfo, depends on ir.h., scope.h, source_range etc. * We dont include this on lite interpreter side. Thus on lite interpreter side * we cannot have valid definition of PyTorchBackendDebugInfo. However we do not * need valid instance of __backend_debug_info in lite interpreter anyway as we * dont serialize this info as part of LowerdModule as mentioned ealrier. * However since LoweredModule has registered attribute of __backend_debug_info * we still need to make sure that BackendDebugInfo is registered with * TorchScript. However in this instance it does not have to be backed by * PyTorchBackendDebugInfo, so we create a dummy PyTorchBackendDebugInfoDummy * just for this purpose. */ class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder { public: PyTorchBackendDebugInfoDummy() = default; }; #endif } // namespace torch::jit ```
============================================================================================================================================== SOURCE CODE FILE: backend_detail.h LINES: 1 SIZE: 1.09 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_detail.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/api/module.h> #include <ATen/core/jit_type.h> #include <functional> namespace torch::jit { using DebugHandleType = int64_t; using NodeToDebugHandle = std::unordered_map<Node*, DebugHandleType>; using BackendDebugHandleGenerator = std::function<NodeToDebugHandle(const std::shared_ptr<Graph>&)>; namespace detail { using BackendPreprocessFunction = std::function<c10::IValue( const Module&, const c10::Dict<IValue, IValue>&, const BackendDebugHandleGenerator& generate_debug_handles)>; TORCH_API void registerBackendPreprocessFunction( const std::string& name, const BackendPreprocessFunction& preprocess); bool hasBackendPreprocessFunction(const std::string& name); BackendPreprocessFunction getBackendPreprocessFunction(const std::string& name); TORCH_API Module codegen_backend_module( const std::string& backend_name, const Module& orig_module, const c10::Dict<IValue, IValue>& method_compile_spec, const c10::DictTypePtr& any_dict_ty); } // namespace detail } // namespace torch::jit ```
================================================================================================================================================= SOURCE CODE FILE: backend_exception.h LINES: 1 SIZE: 2.12 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_exception.h ENCODING: utf-8 ```h #pragma once #include <c10/util/Exception.h> #include <utility> namespace c10 { class TORCH_API BackendRuntimeException : public c10::Error { public: // Use debug_handle to throw exception BackendRuntimeException( SourceLocation loc, std::string msg, int64_t debug_handle) : c10::Error(loc, std::move(msg)) { debug_handles.push_back(debug_handle); } // If rethrowing, can push another debug_handle // This is useful in couple of scenarios. // 1. A submodule is lowered and lite interperter has CallMethod // to lowered module's method. In this case lowered module will throw with // a handle, plus there will be another debug handle corresponding // to the CallMethod node in lite interpreter. Both together give complete // trace. This function allows lite interpreter to rethrow with debug // handle it has for CallMethod. // 2. Another scenarios is when lite interperter can make function calls or // the lowered backend also has function call ability. Thus we have // multiple function frames. Now we need a stack of handles to symbolicate // entire stack trace. void pushDebugHandle(int64_t debug_handle) { debug_handles.push_back(debug_handle); } const std::vector<int64_t>& getDebugHandles() { return debug_handles; } private: // Stores stack of debug handles. std::vector<int64_t> debug_handles; }; } // namespace c10 #define TORCH_DELEGATED_BACKEND_THROW(cond, msg, debug_handle) \ if (C10_UNLIKELY_OR_CONST(!(cond))) { \ throw ::c10::BackendRuntimeException( \ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \ msg, \ debug_handle); \ } #define TORCH_DELEGATED_BACKEND_RETHROW(e, debug_handle) \ do { \ e.pushDebugHandle(debug_handle); \ throw; \ } while (false) #define DEBUG_HANDLE_UNKNOWN -1 ```
============================================================================================================================================ SOURCE CODE FILE: backend_init.h LINES: 1 SIZE: 0.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_init.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/python/pybind.h> #include <torch/csrc/utils/pybind.h> namespace torch::jit { // Initialize Python bindings for JIT to_<backend> functions. void initJitBackendBindings(PyObject* module); } // namespace torch::jit ```
================================================================================================================================================= SOURCE CODE FILE: backend_interface.h LINES: 1 SIZE: 1.16 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_interface.h ENCODING: utf-8 ```h #pragma once #include <torch/custom_class.h> namespace torch::jit { // Interface for a JIT backend. class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder { public: PyTorchBackendInterface() noexcept; ~PyTorchBackendInterface() override; // Returns true if the backend is available to process delegation calls. virtual bool is_available() = 0; // Compile the module contained in \p processed using the details provided in // \p method_compile_spec for each module method that should be compiled for // the backend. \p method_compile_spec should be of type Dict<string, Any>. // \returns a dictionary of type Dict<string, Any> that contains a backend // handle each method that can run on the backend (i.e. each key in \p // method_compile_spec). virtual c10::impl::GenericDict compile( c10::IValue processed, c10::impl::GenericDict method_compile_spec) = 0; // Execute the method specified by \p handle using \p inputs. \returns the // outputs as a tuple. virtual c10::impl::GenericList execute( c10::IValue handle, c10::impl::GenericList inputs) = 0; }; } // namespace torch::jit ```
================================================================================================================================================== SOURCE CODE FILE: backend_preprocess.h LINES: 1 SIZE: 0.42 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_preprocess.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/backends/backend_detail.h> namespace torch::jit { class backend_preprocess_register { std::string backend_name_; public: backend_preprocess_register( const std::string& name, const detail::BackendPreprocessFunction& preprocess) : backend_name_(name) { detail::registerBackendPreprocessFunction(name, preprocess); } }; } // namespace torch::jit ```
================================================================================================================================================ SOURCE CODE FILE: backend_resolver.h LINES: 1 SIZE: 0.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\backends\backend_resolver.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/frontend/resolver.h> namespace torch::jit { // Create a Resolver for use in generating LoweredModules for specific backends. TORCH_API std::shared_ptr<Resolver> loweredModuleResolver(); } // namespace torch::jit ```
============================================================================================================================================= SOURCE CODE FILE: interface.h LINES: 1 SIZE: 1.86 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\codegen\cuda\interface.h ENCODING: utf-8 ```h #pragma once #include <c10/macros/Export.h> #include <torch/csrc/jit/ir/ir.h> #include <torch/csrc/jit/passes/pass_manager.h> #include <torch/csrc/jit/runtime/profiling_record.h> /* * This file contains APIs for cuda fuser; * * We use an empty static struct to hold the function pointers, which are * registered separately. This is to support cpu-only compilation. * Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp */ namespace torch::jit::fuser::cuda { TORCH_API std::atomic<bool>& getCudaFusionGuardMode(); TORCH_API bool getSingletonFusion(); TORCH_API bool setSingletonFusion(bool value); TORCH_API bool getHorizontalFusion(); TORCH_API bool setHorizontalFusion(bool value); // dummy struct to allow API registration struct CudaFuserInterface { void (*fn_compile_n)(Node*) = nullptr; void (*fn_run_n_s)(const Node*, Stack&) = nullptr; void (*fn_fuse_graph)(std::shared_ptr<Graph>&) = nullptr; bool (*fn_can_fuse_n)(const Node*) = nullptr; void (*fn_insert_profile_inodes)(ProfilingRecord* pr) = nullptr; bool (*fn_profile_n)(const Node*) = nullptr; bool (*fn_skip_n)(const std::string&, bool flip) = nullptr; }; // Get interface, this is used by registration and user facing API internally TORCH_API CudaFuserInterface* getFuserInterface(); TORCH_API void compileFusionGroup(Node* fusion_node); TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack); TORCH_API void fuseGraph(std::shared_ptr<Graph>&); TORCH_API bool canFuseNode(const Node* node); TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr); TORCH_API bool profileNode(const Node* node); TORCH_API bool skipNode(const std::string& symbol_str, bool flip = true); TORCH_API bool isEnabled(); TORCH_API bool setEnabled(bool is_enabled); TORCH_API bool canBeEnabled(); } // namespace torch::jit::fuser::cuda ```
================================================================================================================================================= SOURCE CODE FILE: builtin_functions.h LINES: 1 SIZE: 0.22 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\builtin_functions.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/csrc/jit/api/module.h> namespace torch::jit { TORCH_API const std::vector<Function*>& getAllBuiltinFunctionsFor(Symbol name); } // namespace torch::jit ```
========================================================================================================================================================== SOURCE CODE FILE: canonicalize_modified_loop.h LINES: 1 SIZE: 0.29 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\canonicalize_modified_loop.h ENCODING: utf-8 ```h #pragma once #include <memory> #include <torch/csrc/Export.h> namespace torch::jit { struct Graph; // Transforms loops so that they can be represented as python // for or while loops TORCH_API void CanonicalizeModifiedLoops(std::shared_ptr<Graph>& graph); } // namespace torch::jit ```
==================================================================================================================================================== SOURCE CODE FILE: concrete_module_type.h LINES: 1 SIZE: 9.06 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\concrete_module_type.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/ivalue.h> #include <torch/csrc/jit/api/module.h> #include <torch/csrc/jit/python/pybind_utils.h> #include <memory> #include <string> #include <vector> namespace torch::jit { enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT }; class ConcreteModuleType; // You can think of an nn.Module as a template that corresponds to a family of // JIT types. The template "arguments" are things like the constant values. // e.g. // class M(nn.Module): // __constants__ = ["const"] // ... // // Is similar to writing the following in C++: // // template<TConst> // class M { // ... // } // // We need to consider each different member of the type family a different JIT // type because, e.g. different constant values lead to different versions of // the same method. // // ConcreteModuleType corresponds to a single member of the type family, with // all template arguments fully specified. Two Modules that share a // ConcreteModuleType can share a JIT type, and vice versa. // // Why not just use a JIT type to represent concrete types? Because constants, // function attributes, etc. are currently not representable in the type system, // so this acts a non-first-class way of tracking concrete types. // // ConcreteModuleType is also the source of truth for servicing all // ModuleValue::attr calls. This is so we can guarantee that if two Module's // share a JIT type (and thus a ConcreteModuleType), then they behave the same // way when you access attributes on them. // ConcreteModuleType has two phases. // 1. Creation: First we build it up, during the ScriptModule conversion // process. This is represented by ConcreteModuleTypeBuilder. // ...then the converter calls ConcreteModuleTypeBuilder::build(), producing // a // ConcreteModuleType ready for querying. // 2. Querying: We use ConcreteModuleType as a source of truth for // ModuleValue::attr calls during method compilation. // Represents a concrete type during in the process for construction. We use // this to decide whether we can share types between modules. class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder { public: explicit ConcreteModuleTypeBuilder(py::object pyClass) { TORCH_INTERNAL_ASSERT(pyClass); pyClass_ = std::move(pyClass); } void addConstant(std::string name, py::object value); void addConstant(std::string name, IValue value); void addAttribute( std::string name, const TypePtr& type, bool isParameter, bool isBuffer); void addFunctionAttribute( std::string name, const TypePtr& type, py::object pyFunction); void addModule(std::string name, std::shared_ptr<ConcreteModuleType> meta); void addForwardHook(py::object hook); void addForwardPreHook(py::object pre_hook); void addOverload( std::string methodName, std::vector<std::string> overloadedMethodNames); void addBuiltinFunction(std::string name, const std::string& symbol_name); void addFailedAttribute(std::string name, std::string failureReason); void addIgnoredAttribute(std::string name); void setIterableModuleKind(IterableModuleKind kind); // If a ConcreteModuleType is poisoned, it will never compare equal to any // other concrete type void setPoisoned(); std::shared_ptr<ConcreteModuleType> build() const { return std::make_shared<ConcreteModuleType>(*this); } // This determines whether two modules can share a type. The container structs // used by ConcreteModuleType have been defined such that operator== // implements a meaningful comparison in that context. bool equals(const ConcreteModuleTypeBuilder& other) const; struct FunctionAttribute { FunctionTypePtr function_; py::object pyFunction_; friend bool operator==( const FunctionAttribute& lhs, const FunctionAttribute& rhs) { // Functions are not first class, so we can't do type comparison like a // regular attribute. So we do a pointer equality check on the actual // Python function object. return lhs.pyFunction_.is(rhs.pyFunction_); } }; struct Attribute { Attribute(TypePtr type, bool isParam, bool isBuffer) : type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {} friend bool operator==(const Attribute& lhs, const Attribute& rhs) { return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_; } TypePtr type_; bool isParam_; bool isBuffer_; }; struct ModuleInfo { ModuleInfo(std::string name, std::shared_ptr<ConcreteModuleType> meta) : name_(std::move(name)), meta_(std::move(meta)) {} friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs); std::string name_; std::shared_ptr<ConcreteModuleType> meta_; }; private: ConcreteModuleTypeBuilder() = default; ClassTypePtr createTypeFromThis() const; // If true, this type will never compare equally to anything else. This is // used if we want to ensure that this type is not shared (for example, if it // came from a traced module) bool isPoisoned_ = false; // The value of any constants defined by the module. std::unordered_map<std::string, IValue> constants_; // The types of any attributes OrderedDict<std::string, Attribute> attributes_; // Overloads, in the same format as `__overloads__` in Python std::unordered_map<std::string, std::vector<std::string>> overloads_; // Any attributes we failed to convert to TorchScript, along with a hint as to // why std::unordered_map<std::string, std::string> failedAttributes_; // Any attributes that were marked as ignored. They cannot be used in // TorchScript but can still be used in ignored function in Python. std::unordered_set<std::string> ignoredAttributes_; // Any function attributes. These are special right now because functions are // not first-class in the type system. std::unordered_map<std::string, FunctionAttribute> functionAttributes_; // Function attributes that are calls to builtin functions. These get // de-sugared directly into the corresponding aten:: call. The map is // attribute name -> aten symbol name std::unordered_map<std::string, c10::Symbol> builtinFunctions_; // The concrete types of any submodules std::vector<ModuleInfo> modules_; // Hooks to be called before/after forward when the module // is called directly. Used to ensure modules have different types // when they have different python hooks // Actual hooks are added to ClassType directly during compilation std::vector<py::object> forwardHooks_; std::vector<py::object> forwardPreHooks_; // If something is a ModuleDict/ModuleList, it means: // 1. The order of the submodules matters for comparing the type // 2. The compiler is allowed to treat it like a dict/tuple IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE; // The original `nn.Module` class that we derived this ScriptModule from. py::object pyClass_; // NOTE: If you ever add any more state to this struct, you need to make sure // operator== still makes sense! friend ConcreteModuleType; }; // Represents a finalized concrete type, used to service ModuleValue::attr calls // during method compilation. class VISIBILITY_HIDDEN ConcreteModuleType { public: explicit ConcreteModuleType(ConcreteModuleTypeBuilder data); static std::shared_ptr<ConcreteModuleType> fromJitType(TypePtr type); TypePtr getJitType() const; std::optional<py::object> getPyClass() const; IterableModuleKind getIterableModuleKind() const; std::optional<std::vector<std::string>> findOverloads( const std::string& name) const; std::optional<Function*> findFunctionAttribute(const std::string& name) const; std::optional<c10::Symbol> findBuiltinFunction(const std::string& name) const; std::shared_ptr<ConcreteModuleType> findSubmoduleConcreteType( const std::string& name) const; std::optional<std::string> findFailedAttribute(const std::string& name) const; bool isIgnoredAttribute(const std::string& name) const; // These getters are only here to return things as types that can be // automatically converted by pybind. std::unordered_map<std::string, py::object> getConstantsPy() const; std::unordered_map<std::string, std::pair<TypePtr, bool>> getAttributesPy() const; std::vector<std::pair<std::string, std::shared_ptr<ConcreteModuleType>>> getModulesPy() const; bool equals(const ConcreteModuleType& other) const { if (jitType_ == other.jitType_) { // If the computed types are the same, these modules can (obviously) share // a type. return true; } return data_.equals(other.data_); } bool equals(const ConcreteModuleTypeBuilder& other) const { return data_.equals(other); } void dump() const; private: ConcreteModuleType() = default; // The JIT type derived from this ConcreteModuleType. ConcreteModuleTypeBuilder data_; TypePtr jitType_; }; } // namespace torch::jit ```
============================================================================================================================================== SOURCE CODE FILE: convert_to_ssa.h LINES: 1 SIZE: 0.31 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\convert_to_ssa.h ENCODING: utf-8 ```h #pragma once #include <functional> #include <memory> #include <string> #include <torch/csrc/Export.h> #include <torch/csrc/jit/ir/ir.h> namespace torch::jit { // Convert a graph with Loads & Stores into SSA form TORCH_API void ConvertToSSA(std::shared_ptr<Graph>& graph); } // namespace torch::jit ```
============================================================================================================================================= SOURCE CODE FILE: edit_distance.h LINES: 1 SIZE: 0.24 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\edit_distance.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <cstddef> namespace torch::jit { TORCH_API size_t ComputeEditDistance( const char* word1, const char* word2, size_t maxEditDistance); } // namespace torch::jit ```
============================================================================================================================================ SOURCE CODE FILE: error_report.h LINES: 1 SIZE: 1.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\error_report.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/frontend/tree.h> namespace torch::jit { struct Call { std::string fn_name; SourceRange caller_range; }; struct TORCH_API ErrorReport : public std::exception { ErrorReport(const ErrorReport& e); explicit ErrorReport(const SourceRange& r); explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {} explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {} const char* what() const noexcept override; struct TORCH_API CallStack { // These functions are used to report why a function was being compiled // (i.e. what was the call stack of user functions at compilation time that // led to this error) CallStack(const std::string& name, const SourceRange& range); ~CallStack(); // Change the range that is relevant for the current function (i.e. after // each successful expression compilation, change it to the next expression) static void update_pending_range(const SourceRange& range); }; static std::string current_call_stack(); private: template <typename T> friend const ErrorReport& operator<<(const ErrorReport& e, const T& t); mutable std::stringstream ss; OwnedSourceRange context; mutable std::string the_message; std::vector<Call> error_stack; }; template <typename T> const ErrorReport& operator<<(const ErrorReport& e, const T& t) { e.ss << t; return e; } } // namespace torch::jit ```
=============================================================================================================================================== SOURCE CODE FILE: exit_transforms.h LINES: 1 SIZE: 0.20 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\exit_transforms.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/csrc/jit/ir/ir.h> namespace torch::jit { TORCH_API void TransformExits(std::shared_ptr<Graph>& graph); } // namespace torch::jit ```
====================================================================================================================================================== SOURCE CODE FILE: function_schema_parser.h LINES: 1 SIZE: 0.81 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\function_schema_parser.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/function_schema.h> #include <c10/macros/Macros.h> #include <string> #include <variant> namespace torch::jit { // allow_typevars: If true, we assume that lowercase types that we don't // understand are type variables. This is only needed for TorchScript (and not // not needed for custom ops). // If false, we disallow typevars, except in certain cases for BC reason (i.e. // your op is in the aten or prim namespace). TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName( const std::string& schemaOrName, bool allow_typevars = true); TORCH_API c10::FunctionSchema parseSchema( const std::string& schema, bool allow_typevars = true); TORCH_API c10::OperatorName parseName(const std::string& name); } // namespace torch::jit ```
===================================================================================================================================================== SOURCE CODE FILE: inline_loop_condition.h LINES: 1 SIZE: 0.33 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\inline_loop_condition.h ENCODING: utf-8 ```h #pragma once #include <functional> #include <memory> #include <string> #include <torch/csrc/Export.h> #include <torch/csrc/jit/ir/ir.h> namespace torch::jit { TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph); TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block); } // namespace torch::jit ```
========================================================================================================================================== SOURCE CODE FILE: ir_emitter.h LINES: 1 SIZE: 0.52 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\ir_emitter.h ENCODING: utf-8 ```h #pragma once #include <functional> #include <memory> #include <string> #include <torch/csrc/jit/api/module.h> #include <torch/csrc/jit/frontend/error_report.h> #include <torch/csrc/jit/frontend/resolver.h> #include <torch/csrc/jit/frontend/sugared_value.h> #include <torch/csrc/jit/frontend/tree_views.h> #include <torch/csrc/jit/ir/ir.h> namespace torch::jit { TORCH_API void runCleanupPasses(std::shared_ptr<Graph>& to_clean); TORCH_API bool meaningfulName(const std::string& name); } // namespace torch::jit ```
===================================================================================================================================== SOURCE CODE FILE: lexer.h LINES: 8 SIZE: 19.04 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\lexer.h ENCODING: utf-8 ```h #pragma once #include <c10/macros/Macros.h> #include <c10/util/Exception.h> #include <torch/csrc/Export.h> #include <torch/csrc/jit/frontend/parser_constants.h> #include <torch/csrc/jit/frontend/source_range.h> #include <torch/csrc/jit/frontend/strtod.h> #include <algorithm> #include <clocale> #include <cstdlib> #include <memory> #include <sstream> #include <string> #include <vector> namespace torch::jit { // single character tokens are just the character itself '+' // multi-character tokens need an entry here // if the third entry is not the empty string, it is used // in the lexer to match this token. // These kinds are also used in Tree.h as the kind of the AST node. // Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the // lexer. #define TC_FORALL_TOKEN_KINDS(_) \ _(TK_EOF, "eof", "") \ _(TK_WHITESPACE, "whitespace", "") \ _(TK_WHITESPACE_EOF, "whitespace_eof", "") \ _(TK_NUMBER, "number", "") \ _(TK_NEWLINE, "newline", "") \ _(TK_INDENT, "indent", "") \ _(TK_DEDENT, "dedent", "") \ _(TK_DEF, "def", "def") \ _(TK_EQUIVALENT, "equivalent", "<=>") \ _(TK_IDENT, "ident", "") \ _(TK_STRING, "string", "") \ _(TK_STRINGLITERAL, "string_literal", "") \ _(TK_CONST, "const", "") \ _(TK_LIST, "list", "") \ _(TK_DICT, "dict", "") \ _(TK_OPTION, "option", "") \ _(TK_APPLY, "apply", "") \ _(TK_COMPREHENSION, "comprehension", "") \ _(TK_RANGE_CONSTRAINT, "range_constraint", "") \ _(TK_PARAM, "param", "") \ _(TK_INFERRED, "inferred", "") \ _(TK_ACCESS, "access", "") \ _(TK_ASSIGN, "assign", "") \ _(TK_AUG_ASSIGN, "aug_assign", "") \ _(TK_ATTRIBUTE, "attribute", "") \ _(TK_IF, "if", "if") \ _(TK_ELSE, "else", "else") \ _(TK_ELIF, "elif", "elif") \ _(TK_WHILE, "while", "while") \ _(TK_EXPR_STMT, "expression statement", "") \ _(TK_RETURN, "return", "return") \ _(TK_IS, "is", "is") \ _(TK_ISNOT, "is not", "is not") \ _(TK_NE, "ne", "!=") \ _(TK_EQ, "eq", "==") \ _(TK_LE, "le", "<=") \ _(TK_GE, "ge", ">=") \ _(TK_FLOOR_DIV, "floordiv", "//") \ _(TK_IF_EXPR, "if", "") \ _(TK_TRUE, "True", "True") \ _(TK_FALSE, "False", "False") \ _(TK_NONE, "None", "None") \ _(TK_AND, "and", "and") \ _(TK_OR, "or", "or") \ _(TK_NOT, "not", "not") \ _(TK_LSHIFT, "<<", "<<") \ _(TK_RSHIFT, ">>", ">>") \ _(TK_CAST, "cast", "") \ _(TK_PLUS_EQ, "+=", "+=") \ _(TK_MINUS_EQ, "-=", "-=") \ _(TK_TIMES_EQ, "*=", "*=") \ _(TK_DIV_EQ, "/=", "/=") \ _(TK_MOD_EQ, "%=", "%=") \ _(TK_BIT_OR_EQ, "|=", "|=") \ _(TK_BIT_AND_EQ, "&=", "&=") \ _(TK_BIT_XOR_EQ, "^=", "^=") \ _(TK_LSHIFT_EQ, "<<=", "<<=") \ _(TK_RSHIFT_EQ, ">>=", ">>=") \ _(TK_POW_EQ, "**=", "**=") \ _(TK_GLOBAL, "global", "global") \ _(TK_BUILT_IN, "built-in", "") \ _(TK_SUBSCRIPT, "subscript", "") \ _(TK_VAR, "variable", "") \ _(TK_NOTHING, "nothing", "") \ _(TK_DICT_LITERAL, "dict-literal", "") \ _(TK_LIST_LITERAL, "list-literal", "") \ _(TK_TUPLE_LITERAL, "tuple-literal", "") \ _(TK_FOR, "for", "for") \ _(TK_IN, "in", "in") \ _(TK_NOTIN, "not in", "not in") \ _(TK_STARRED, "starred", "") \ _(TK_UNARY_MINUS, "unary minus", "") \ _(TK_POW, "pow operator", "**") \ _(TK_ARROW, "arrow", "->") \ _(TK_DECL, "decl", "") \ _(TK_SLICE_EXPR, "slice expr", "") \ _(TK_TYPE_COMMENT, "type comment", "# type:") \ _(TK_RAISE, "raise", "raise") \ _(TK_ASSERT, "assert", "assert") \ _(TK_DOTS, "dots", "...") \ _(TK_LIST_COMP, "list comprehension", "") \ _(TK_DICT_COMP, "dict comprehension", "") \ _(TK_BREAK, "break", "break") \ _(TK_CONTINUE, "continue", "continue") \ _(TK_DELETE, "del", "del") \ _(TK_PASS, "pass", "pass") \ _(TK_CLASS_DEF, "class", "class") \ _(TK_IMPORT, "import", "import") \ _(TK_WITH, "with", "with") \ _(TK_WITH_ITEM, "withitem", "") \ _(TK_AS, "as", "as") \ _(TK_PROP, "property", "") \ _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \ _(TK_NONE_TYPE, "NoneType", "NoneType") enum TokenKind { // we use characters to represent themselves so skip all valid characters // before // assigning enum values to multi-char tokens. TK_DUMMY_START = 256, #define DEFINE_TOKEN(tok, _, _2) tok, TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN) #undef DEFINE_TOKEN }; TORCH_API std::string kindToString(int kind); TORCH_API int stringToKind(const std::string& str); // nested hash tables that indicate char-by-char what is a valid token. struct TokenTrie; using TokenTrieRef = std::unique_ptr<TokenTrie>; struct TokenTrie { TokenTrie() = default; void insert(const char* str, int tok) { if (*str == '\0') { AT_ASSERT(kind == 0); kind = tok; return; } for (size_t i = 0, e = child_chars.size(); i < e; ++i) { if (child_chars[i] == *str) { child_tries[i]->insert(str + 1, tok); return; } } child_chars.emplace_back(*str); child_tries.emplace_back(std::make_unique<TokenTrie>()); child_tries.back()->insert(str + 1, tok); } int kind{0}; // 0 == invalid token std::vector<char> child_chars; std::vector<TokenTrieRef> child_tries; }; // stuff that is shared against all TC lexers/parsers and is initialized only // once. struct TORCH_API SharedParserData { SharedParserData() : head(new TokenTrie()) { for (const char* c = valid_single_char_tokens; *c; c++) { std::string str(1, *c); head->insert(str.c_str(), *c); } #define ADD_CASE(tok, _, tokstring) \ if (*(tokstring) != '\0') { \ head->insert((tokstring), (tok)); \ } TC_FORALL_TOKEN_KINDS(ADD_CASE) #undef ADD_CASE } bool match( StringCordView::Iterator pos, bool continuation, // are we inside a scope where newlines don't count // (e.g. inside parens) bool whitespace_token, // should we treat whitespace as a token int* kind, StringCordView::Iterator* start, StringCordView::Iterator* end) { *start = pos; // skip whitespace while (pos.has_next() && isblank(*pos)) { ++pos; } // special handling if (pos.has_next()) { if (*pos == '#' && !isTypeComment(pos)) { // skip comments while (pos.has_next() && *pos != '\n') ++pos; // tail call, handle whitespace and more comments return match(pos, continuation, whitespace_token, kind, start, end); } if (*pos == '\\') { auto newiter = pos; ++newiter; if (newiter.has_next() && *newiter == '\n' && !whitespace_token) { ++newiter; return match(newiter, continuation, false, kind, start, end); } } if (*pos == '\n') { return match(++pos, continuation, !continuation, kind, start, end); } } // we handle white space before EOF because in the case we have something // like the following where we need to generate the dedent token if foo: // ... // else: // pass if (whitespace_token) { *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE; *end = pos; return true; } if (!pos.has_next()) { *kind = TK_EOF; *start = pos; *end = *start; return true; } // invariant: the next token is not whitespace or newline *start = pos; // check for a valid number size_t len = 0; if (isNumber(pos.rest_line(), 0, &len)) { *end = *start; *end += len; *kind = TK_NUMBER; return true; } // check for string if (isString(pos.rest_line(), 0, &len)) { *kind = TK_STRINGLITERAL; *end = *start; *end += len; return true; } // check for either an ident or a token // ident tracks whether what we have scanned so far could be an identifier // matched indicates if we have found any match. bool matched = false; bool ident = true; TokenTrie* cur = head.get(); // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr); // i++) for (size_t i = 0; pos.has_next() && (ident || cur != nullptr); ++pos, ++i) { ident = ident && validIdent(i, *pos); if (ident) { matched = true; *end = pos.next_iter(); *kind = TK_IDENT; } // check for token second, so that e.g. 'max' matches the token TK_MAX // rather the // identifier 'max' if (cur) { const auto begin_it = cur->child_chars.begin(); const auto end_it = cur->child_chars.end(); const auto ch_it = std::find(begin_it, end_it, *pos); cur = (ch_it == end_it) ? nullptr : cur->child_tries[ch_it - begin_it].get(); if (cur && cur->kind != 0) { matched = true; *end = pos.next_iter(); *kind = cur->kind; } } } return matched; } bool isUnary(int kind, int* prec); bool isBinary(int kind, int* prec); bool isRightAssociative(int kind) { switch (kind) { case '?': case TK_POW: case TK_IF: return true; default: return false; } } private: bool validIdent(size_t i, char n) { return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); } // 1. skip whitespace // 2. handle comment or newline // bool isNumber(std::string_view str, size_t start, size_t* len) { char first = str[start]; // strtod allows numbers to start with + or - or nan or inf // http://en.cppreference.com/w/cpp/string/byte/strtof // but we want only the number part, otherwise 1+3 will turn into two // adjacent numbers in the lexer if (first == '-' || first == '+' || isalpha(first)) return false; const char* startptr = str.data() + start; char* endptr = nullptr; torch::jit::strtod_c(startptr, &endptr); *len = endptr - startptr; // check if the number is complex valued // access is safe because string is assumed to be null terminated if (endptr != nullptr && *endptr == 'j') { *len += 1; } return *len > 0; } bool isCharCount(char c, std::string_view str, size_t start, int len) { // count checks from [start, start + len) return start + len <= str.size() && std::count(str.begin() + start, str.begin() + start + len, c) == len; } // python concatenates all adjacent strings "a" "b" == "ab" // strings can be enclosed with 1 or 3 single or double quotes // if enclosed with 3 quotes newlines are valid // as elsewhere, backslash and new line should be ignored bool isString(std::string_view str, size_t start, size_t* len) { char quote = str[start]; if (quote != '\"' && quote != '\'') return false; int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1; // end is now set past the opening quotation marks size_t end = start + quote_len; while (end < str.size() && !isCharCount(quote, str, end, quote_len)) { if (str[end] == '\n' && quote_len != 3) { return false; } // handle escaped characters. advances past escaped quotation marks, // escaped newlines and escaped backslashes // multi-char escapes like \x1A are handled fine here because the // remainder of the escape are valid string characters anyway if (str[end] == '\\') { end++; } end++; } // set length equal to the complete string including quotations *len = end - start + quote_len; // if end finished without going past the last character of the string than // there is a match return end < str.size(); } bool isblank(int n) { return isspace(n) && n != '\n'; } bool isTypeComment(StringCordView::Iterator str_iter) { std::string_view rest_line = str_iter.rest_line(); const std::string type_string = "# type:"; if (rest_line.size() < type_string.length()) { return false; } auto match_string = rest_line.substr(0, type_string.size()); return match_string == type_string; } // Make an exception ignoring comments for type annotation comments bool isTypeComment(const StringCordView& str, size_t pos) { const std::string type_string = "# type:"; if (str.size() < pos + type_string.length()) { return false; } auto match_string = str.substr(pos, type_string.size()); return match_string == type_string; } TokenTrieRef head; }; TORCH_API SharedParserData& sharedParserData(); struct Token { int kind; SourceRange range; Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {} std::string text() { return std::string(range.token_text()); } std::string kindString() const { return kindToString(kind); } }; struct Lexer { explicit Lexer(std::shared_ptr<Source> source) : source(std::move(source)), indent_stack(), next_tokens(), shared(sharedParserData()) { auto first_indent = lexRaw(true); indent_stack.push_back(first_indent.range.size()); lex(); } // Return the current token, and then move to the next one Token next() { if (next_tokens.empty()) reportError("Lexer invariant violated: empty token queue"); Token r = std::move(next_tokens.front()); next_tokens.erase(next_tokens.begin()); if (next_tokens.empty()) { lex(); } return r; } // Skip the current token if it matches the given kind bool nextIf(int kind) { if (cur().kind != kind) return false; next(); return true; } [[noreturn]] void reportError(const std::string& what) { reportError(what, cur()); } [[noreturn]] void reportError(const std::string& what, const Token& t) { std::stringstream ss; ss << what << ":\n"; t.range.highlight(ss); throw std::runtime_error(ss.str()); } [[noreturn]] void expected(const std::string& what, const Token& t) { std::stringstream ss; ss << "expected " << what << " but found '" << t.kindString() << "' here:\n"; t.range.highlight(ss); throw std::runtime_error(ss.str()); } [[noreturn]] void expected(const std::string& what) { expected(what, cur()); } // Check that the current token has a given kind, return the current token, // and advance to the next one. Token expect(int kind) { if (cur().kind != kind) { expected(kindToString(kind)); } return next(); } Token& lookahead() { if (next_tokens.size() < 2) { lex(); } return next_tokens[1]; } Token& cur() { return next_tokens.front(); } private: void lex() { auto r = lexRaw(); switch (r.kind) { case '(': case '[': case '{': nesting++; break; case ')': case ']': case '}': nesting--; break; case TK_WHITESPACE: case TK_WHITESPACE_EOF: { const auto depth = r.kind == TK_WHITESPACE_EOF ? indent_stack.front() : r.range.size(); // note: TK_WHITESPACE_EOF is whitespace right before the EOF token // just like we allow the code to be indented to a particular initial // indent level, we allow the final indent to be anything and set // it back to the initial indent level. This allows the code to be // put into string literals inside code without worrying about final // whitespace if (depth > indent_stack.back()) { indent_stack.push_back(depth); r.kind = TK_INDENT; } else if (depth == indent_stack.back()) { r.kind = TK_NEWLINE; } else { next_tokens.emplace_back(TK_NEWLINE, r.range); while (indent_stack.back() != depth) { indent_stack.pop_back(); next_tokens.emplace_back(TK_DEDENT, r.range); if (indent_stack.empty()) { reportError("invalid indent level " + std::to_string(depth), r); } } return; // We've already queued the tokens } } break; default: break; } next_tokens.push_back(std::move(r)); } Token lexRaw(bool whitespace_token = false) { AT_ASSERT(source); if (current == nullptr) { AT_ASSERT(pos == 0); current = std::make_unique<StringCordView::Iterator>( source->text_str().begin()); } StringCordView::Iterator start_iter = *current; StringCordView::Iterator end_iter = *current; int kind = 0; if (!shared.match( *current, nesting > 0, whitespace_token, &kind, &start_iter, &end_iter)) { expected( "a valid token", Token( **current, SourceRange(source, start_iter, start_iter.pos() + 1))); } auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos())); pos = end_iter.pos(); *current = end_iter; return t; } std::shared_ptr<Source> source; std::unique_ptr<StringCordView::Iterator> current; size_t pos{0}; size_t nesting{0}; // depth of ( [ { nesting... std::vector<size_t> indent_stack; // stack of indentation level of blocks // Invariant: this should always contain at least a single element std::vector<Token> next_tokens; // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) SharedParserData& shared; }; } // namespace torch::jit ```
================================================================================================================================================ SOURCE CODE FILE: mini_environment.h LINES: 1 SIZE: 1.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\mini_environment.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/jit_type.h> #include <torch/csrc/jit/ir/ir.h> namespace torch::jit { // Simple data structure for containing a type T in nested control blocks // Should only be used after initial compilation where type checking and // loads and stores are emitted template <typename T> struct MiniEnvironment { MiniEnvironment(Block* b, std::shared_ptr<MiniEnvironment> next = nullptr) : next(std::move(next)) {} // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::shared_ptr<MiniEnvironment<T>> next; T findInThisFrame(const std::string& name) { auto it = table.find(name); if (it != table.end()) { return it->second; } return nullptr; } T findInAnyFrame(const std::string& name) { for (auto runner = this; runner; runner = runner->next.get()) { if (auto r = runner->findInThisFrame(name)) { return r; } } return nullptr; } void setVar(const std::string& name, T value) { table[name] = value; } std::vector<std::string> definedVariables() { std::vector<std::string> result; result.reserve(table.size()); for (auto& kv : table) { result.push_back(kv.first); } std::sort(result.begin(), result.end()); return result; } private: std::unordered_map<std::string, T> table; }; } // namespace torch::jit ```
============================================================================================================================================ SOURCE CODE FILE: name_mangler.h LINES: 1 SIZE: 0.64 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\name_mangler.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/qualified_name.h> #include <torch/csrc/Export.h> namespace torch::jit { /** * class NameMangler * * Utility to mangle qualified names in order to make them unique. We use this * in various places where we to de-duplicate qualified names. */ class TORCH_API NameMangler { public: // Given a qualified name, return a mangled version that is guaranteed to be // unique with respect to previous/future calls of `mangled()` on this name // mangler instance. c10::QualifiedName mangle(const c10::QualifiedName& name); private: size_t mangleIndex_ = 0; }; } // namespace torch::jit ```
==================================================================================================================================================== SOURCE CODE FILE: parse_string_literal.h LINES: 4 SIZE: 2.33 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\parse_string_literal.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/frontend/error_report.h> #include <torch/csrc/jit/frontend/lexer.h> #include <optional> namespace torch::jit { inline bool isCharCount(char c, const std::string& str, size_t start, int len) { // count checks from [start, start + len) return start + len <= str.size() && std::count( str.begin() + static_cast<ptrdiff_t>(start), str.begin() + static_cast<ptrdiff_t>(start + len), c) == len; } inline std::optional<char> parseOctal(const std::string& str, size_t pos) { //\xxx where x are 0-7 if (pos + 3 >= str.size()) return std::nullopt; size_t c = 0; for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) { auto d = str[pos + i]; if (d < '0' || d > '7') return std::nullopt; c += b * (d - '0'); } if (c >= 256) return std::nullopt; return c; } inline std::string parseStringLiteral( const SourceRange& range, const std::string& str) { size_t quote_len = isCharCount(str[0], str, 0, 3) ? 3 : 1; auto ret_str = str.substr(quote_len, str.size() - quote_len * 2); size_t pos = ret_str.find('\\'); while (pos != std::string::npos) { // invariant: pos has to escape a character because it is a valid string char c = ret_str[pos + 1]; size_t to_erase = 2; switch (ret_str[pos + 1]) { case '\\': case '\'': case '\"': case '\n': break; case 'a': c = '\a'; break; case 'b': c = '\b'; break; case 'f': c = '\f'; break; case 'n': c = '\n'; break; case 'v': c = '\v'; break; case 't': c = '\t'; break; case 'x': throw(ErrorReport(range) << "unsupported hex specifier"); case 'u': case 'U': throw(ErrorReport(range) << "unsupported unicode specifier"); default: // octal value in format \nnn, n is [0-7] if (auto v = parseOctal(ret_str, pos)) { to_erase = 4; c = *v; } else { throw(ErrorReport(range) << " ill formed octal specifier"); } } ret_str.replace(pos, to_erase, /* num copies */ 1, c); pos = ret_str.find('\\', pos + 1); } return ret_str; } } // namespace torch::jit ```
====================================================================================================================================== SOURCE CODE FILE: parser.h LINES: 1 SIZE: 0.66 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\parser.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/csrc/jit/frontend/tree.h> #include <torch/csrc/jit/frontend/tree_views.h> #include <memory> namespace torch::jit { struct Decl; struct ParserImpl; struct Lexer; TORCH_API Decl mergeTypesFromTypeComment( const Decl& decl, const Decl& type_annotation_decl, bool is_method); struct TORCH_API Parser { explicit Parser(const std::shared_ptr<Source>& src); TreeRef parseFunction(bool is_method); TreeRef parseClass(); Decl parseTypeComment(); Expr parseExp(); Lexer& lexer(); ~Parser(); private: std::unique_ptr<ParserImpl> pImpl; }; } // namespace torch::jit ```
================================================================================================================================================ SOURCE CODE FILE: parser_constants.h LINES: 1 SIZE: 0.14 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\parser_constants.h ENCODING: utf-8 ```h #pragma once namespace torch::jit { static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~"; } // namespace torch::jit ```
======================================================================================================================================== SOURCE CODE FILE: resolver.h LINES: 1 SIZE: 1.98 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\resolver.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/jit_type.h> #include <ATen/core/qualified_name.h> #include <torch/csrc/jit/frontend/sugared_value.h> namespace torch::jit { struct Resolver; using ResolverPtr = std::shared_ptr<Resolver>; /** * class Resolver * * Represents an "outer environment" in which we an look up names and return * a corresponding SugaredValue. This is used during compilation to resolve * references to names which are not defined internal to the graph. * * Example: PythonResolver looks at the enclosing Python scope for `name`. * * NOTE: When adding methods, keep this an abstract class (i.e. all new methods * should be purely virtual). Resist the urge to provide a default * implementation; you should explicitly think about how each resolver would * handle the method. */ struct Resolver { virtual ~Resolver() = default; // Resolve a given name to a SugaredValue. This takes the method `m` that the // caller is currently constructing, since we may need to insert nodes into // the graph to create a value. virtual std::shared_ptr<SugaredValue> resolveValue( const std::string& name, GraphFunction& m, const SourceRange& loc) { return nullptr; } // Resolve `name` to a TypePtr. virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) { return nullptr; } }; // A resolver that only understands "torch.foo()" lookups. struct NativeResolver : public Resolver { std::shared_ptr<SugaredValue> resolveValue( const std::string& name, GraphFunction& m, const SourceRange& loc) override { if (name == "torch") { return std::make_shared<BuiltinModule>("aten"); } return nullptr; } TypePtr resolveType(const std::string& name, const SourceRange& loc) override { return nullptr; } }; inline std::shared_ptr<NativeResolver> nativeResolver() { return std::make_shared<NativeResolver>(); } } // namespace torch::jit ```
=============================================================================================================================================== SOURCE CODE FILE: schema_matching.h LINES: 1 SIZE: 2.12 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\schema_matching.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/csrc/jit/ir/ir.h> #include <torch/csrc/jit/ir/named_value.h> #include <ATen/core/function_schema.h> namespace torch::jit { // Try to match a list of inputs and keyword 'attributes' to this // schema. Return the flat list of positional inputs to the call or // `std::nullopt` on failure (`failure_messages` contains a good error // report in this case) struct MatchedSchema { std::vector<Value*> inputs; std::vector<TypePtr> return_types; c10::OptNameList return_field_names; std::string schema_name; }; TORCH_API bool isBlockListedSchema(const FunctionSchema& schema); TORCH_API MatchedSchema matchSchema( const ::c10::FunctionSchema& schema, const SourceRange& loc, Graph& graph, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, const std::optional<NamedValue>& self = std::nullopt); TORCH_API std::pair<size_t, MatchedSchema> matchSchemas( const std::vector<const ::c10::FunctionSchema*>& schemas, const SourceRange& loc, Graph& graph, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, const std::optional<NamedValue>& self = std::nullopt, bool render_errors = false); TORCH_API bool convertibleToList( const TypePtr& type, const TypePtr& list_type_); TORCH_API std::string getFullSchemaName(const ::c10::FunctionSchema& schema); TORCH_API Value* emitBuiltinCall( const SourceRange& loc, Graph& graph, Symbol name, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, const std::optional<NamedValue>& self = std::nullopt); TORCH_API std::optional<size_t> findInputWithName( const std::string& name, at::ArrayRef<NamedValue> kwargs, bool is_aten = false); // applies implicit conversion from value trying to turn it into type // concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type) TORCH_API Value* tryConvertToType( const SourceRange& loc, Graph& graph, const TypePtr& concrete_type, Value* value, bool allow_conversions); } // namespace torch::jit ```
================================================================================================================================================== SOURCE CODE FILE: schema_type_parser.h LINES: 1 SIZE: 1.22 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\schema_type_parser.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/alias_info.h> #include <ATen/core/jit_type.h> #include <c10/macros/Macros.h> #include <c10/util/FunctionRef.h> #include <torch/csrc/jit/frontend/lexer.h> namespace torch::jit { using TypePtr = c10::TypePtr; struct TORCH_API SchemaTypeParser { TypePtr parseBaseType(); std::optional<c10::AliasInfo> parseAliasAnnotation(); std::pair<TypePtr, std::optional<c10::AliasInfo>> parseType(); std::tuple</*fake*/ TypePtr, /*real*/ TypePtr, std::optional<c10::AliasInfo>> parseFakeAndRealType(); std::optional<at::ScalarType> parseTensorDType(const std::string& dtype); TypePtr parseRefinedTensor(); SchemaTypeParser( Lexer& L, bool parse_complete_tensor_types, bool allow_typevars) : complete_tensor_types(parse_complete_tensor_types), L(L), allow_typevars_(allow_typevars) {} private: std::optional<bool> tryToParseRequiresGrad(); std::optional<c10::Device> tryToParseDeviceType(); void parseList( int begin, int sep, int end, c10::function_ref<void()> callback); bool complete_tensor_types; Lexer& L; size_t next_id = 0; bool allow_typevars_; }; } // namespace torch::jit ```
================================================================================================================================================== SOURCE CODE FILE: script_type_parser.h LINES: 1 SIZE: 1.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\script_type_parser.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/jit_type.h> #include <torch/csrc/Export.h> #include <torch/csrc/jit/frontend/resolver.h> #include <torch/csrc/jit/frontend/tree_views.h> namespace torch::jit { /** * class ScriptTypeParser * * Parses expressions in our typed AST format (TreeView) into types and * typenames. */ class TORCH_API ScriptTypeParser { public: explicit ScriptTypeParser() = default; explicit ScriptTypeParser(ResolverPtr resolver) : resolver_(std::move(resolver)) {} c10::TypePtr parseTypeFromExpr(const Expr& expr) const; std::optional<std::pair<c10::TypePtr, int32_t>> parseBroadcastList( const Expr& expr) const; c10::TypePtr parseType(const std::string& str); FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self); c10::IValue parseClassConstant(const Assign& assign); private: c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const; std::optional<std::string> parseBaseTypeName(const Expr& expr) const; at::TypePtr subscriptToType( const std::string& typeName, const Subscript& subscript) const; std::vector<IValue> evaluateDefaults( const SourceRange& r, const std::vector<Expr>& default_types, const std::vector<Expr>& default_exprs); std::vector<Argument> parseArgsFromDecl(const Decl& decl, bool skip_self); std::vector<Argument> parseReturnFromDecl(const Decl& decl); ResolverPtr resolver_ = nullptr; // Need to use `evaluateDefaults` in serialization friend struct ConstantTableValue; friend struct SourceImporterImpl; }; } // namespace torch::jit ```
============================================================================================================================================ SOURCE CODE FILE: source_range.h LINES: 2 SIZE: 12.98 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\source_range.h ENCODING: utf-8 ```h #pragma once #include <c10/util/Exception.h> #include <optional> #include <algorithm> #include <iterator> #include <memory> #include <ostream> #include <sstream> #include <unordered_map> namespace torch::jit { class SourceRangeUnpickler; struct SourceRange; // A stringlike class backed by a vector of string_view // the string represented are logically the concatenation of the string_views // This has advantage of not needing continues memory. struct TORCH_API StringCordView { StringCordView(); StringCordView(const StringCordView&) = default; StringCordView(StringCordView&&) noexcept = default; StringCordView( std::vector<std::string_view> inputs, std::vector<std::shared_ptr<std::string>> ownerships); StringCordView& operator=(const StringCordView&) = default; StringCordView& operator=(StringCordView&&) noexcept = default; size_t size() const { return accumulated_sizes_.back(); } size_t find(const std::string& tok, size_t start) const; size_t find_regex(const std::string& tok, size_t start) const; StringCordView substr(size_t start, size_t size) const; char at(size_t index) const { return *iter_for_pos(index); } char operator[](size_t index) const { return at(index); } std::string str() const { std::stringstream ss; for (auto s : pieces_) { ss << std::string(s); } return ss.str(); } bool operator==(const std::string& rhs) const; bool operator==(const StringCordView& rhs) const; std::string_view piece(size_t index) const { return pieces_[index]; } struct Iterator { Iterator( const StringCordView* str, size_t start_line, size_t start_pos, size_t size) : line_(start_line), pos_(start_pos), str_(str), size_(size) {} explicit Iterator(const StringCordView* str) : Iterator(str, 0, 0, str->size()) {} Iterator() : Iterator(nullptr, 0, 0, 0) {} Iterator(const Iterator&) = default; Iterator(Iterator&&) = default; Iterator& operator=(const Iterator&) = default; Iterator& operator=(Iterator&&) = default; Iterator operator++() { if (size_ == 0) { return *this; } if ((pos_ + 1) < str_->pieces_[line_].size()) { pos_++; } else { line_++; pos_ = 0; } return *this; } Iterator operator++(int) { Iterator prev(*this); ++(*this); return prev; } Iterator next_iter() const { Iterator next(*this); ++next; return next; } Iterator& operator+=(size_t num) { if (!has_next()) { return *this; } size_t target_pos = pos_ + num; if (target_pos >= str_->accumulated_sizes_[line_] && (line_ + 1) < str_->accumulated_sizes_.size() && target_pos < str_->accumulated_sizes_[line_ + 1]) { pos_ = target_pos; return *this; } size_t target_abs_pos = pos() + num; *this = str_->iter_for_pos(target_abs_pos); return *this; } bool operator==(const Iterator& rhs) const { if (!has_next() && !rhs.has_next()) { return true; } return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_); } bool operator!=(const Iterator& rhs) { return !((*this) == rhs); } bool has_next() const { return size_ > 0 && (line_ < str_->pieces_.size()); } char operator*() const { TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size()); TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size()); return str_->pieces_[line_].at(pos_); } // returns rest of the line of the current iterator std::string_view rest_line() const { if (line_ >= str_->pieces_.size()) { return ""; } std::string_view cur_line = str_->pieces_[line_]; return cur_line.substr(pos_, std::string::npos); } size_t pos() const { if (size_ == 0) { return 0; } return str_->accumulated_sizes_[line_] + pos_; } private: size_t line_; size_t pos_; const StringCordView* str_; size_t size_; friend struct StringCordView; }; Iterator begin() const { return Iterator(this, 0, 0, size()); } Iterator end() const { return Iterator(this, pieces_.size(), 0, 0); } Iterator iter_for_pos(size_t pos) const; private: std::vector<std::string_view> pieces_; std::vector<size_t> accumulated_sizes_; std::vector<std::shared_ptr<std::string>> owned_strings_; }; // Source represents a code segment. It keeps track of: // - text_view : the view into text of the code segment // - filename (optional) : if present, represents the name of the file from // which the code segment originated. // - starting_line_no : represents the line in the original file where the // code segment started. struct TORCH_API Source { // Whether or not Source should copy the string passed in the constructor. enum CopiesString { COPIES_STRING, DONT_COPY }; explicit Source( std::string_view text_view, std::optional<std::string> filename = std::nullopt, size_t starting_line_no = 0, std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr, CopiesString copies_str = COPIES_STRING) : filename_(std::move(filename)), starting_line_no_(starting_line_no), gen_ranges_(std::move(gen_ranges)) { if (copies_str == COPIES_STRING) { std::shared_ptr<std::string> allocated_str = std::make_shared<std::string>(text_view.data(), text_view.size()); text_view_ = StringCordView({*allocated_str}, {allocated_str}); } else { text_view_ = StringCordView({text_view}, {}); } calc_line_start_offsets(); } explicit Source( StringCordView str, std::optional<std::string> filename = std::nullopt, size_t starting_line_no = 0, std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr) : text_view_(std::move(str)), filename_(std::move(filename)), starting_line_no_(starting_line_no), gen_ranges_(std::move(gen_ranges)) { calc_line_start_offsets(); } // Given a line number (within source_), return the byte offset of the // beginning of that line. size_t offset_for_line(size_t line) const { return line_starting_offsets_.at(line); } // Returns number of lines present. size_t num_lines() const { return line_starting_offsets_.size(); } // Calculate the line (within the code segment) on which `offset` resides. size_t lineno_for_offset(size_t offset) const { auto iter = std::upper_bound( line_starting_offsets_.begin(), line_starting_offsets_.end(), offset); return iter - line_starting_offsets_.begin() - 1; } // Calculate the line (within the original source file, if present) on which // `lineno` resides. size_t lineno_to_source_lineno(size_t lineno) const { if (filename_) { return lineno + starting_line_no_; } else { return lineno; } } StringCordView get_line(size_t lineno) const { auto start = offset_for_line(lineno); auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start : text_view_.size() - start; return text_view_.substr(start, size); } const StringCordView& text_str() const { return text_view_; } char char_at(size_t index) const { return text_view_.at(index); } size_t size() const { return text_view_.size(); } std::optional<std::string>& filename() { return filename_; } size_t starting_line_no() const { return starting_line_no_; } std::optional<SourceRange> findSourceRangeThatGenerated( const SourceRange& range); ~Source() = default; private: void calc_line_start_offsets() { line_starting_offsets_.clear(); line_starting_offsets_.push_back(0); size_t pos = 0; while ((pos = text_view_.find("\n", pos)) != std::string::npos) { line_starting_offsets_.push_back(++pos); } } StringCordView text_view_; std::optional<std::string> filename_; // If filename_ is not present, starting_line_no_ is don't care size_t starting_line_no_; // Starting offsets for lines into the source. e.g. line 0 starts at // line_starting_offsets_[0], etc. std::vector<size_t> line_starting_offsets_; std::shared_ptr<SourceRangeUnpickler> gen_ranges_; }; // A SourceRange is a reference to subset of a Source, specified by `start` and // `end` byte offsets into the source text. struct TORCH_API SourceRange { SourceRange(std::shared_ptr<Source> source_view, size_t start_, size_t end_) : source_view_(std::move(source_view)), start_(start_), end_(end_) { if (source_view_) { start_iter_ = source_view_->text_str().iter_for_pos(start_); } } SourceRange() : source_view_(nullptr), start_(0), end_(0) {} SourceRange( std::shared_ptr<Source> source_view_, StringCordView::Iterator start_iter, size_t end_) : source_view_(std::move(source_view_)), start_(start_iter.pos()), end_(end_), start_iter_(start_iter) {} const std::string_view token_text() const { size_t size = end() - start(); return start_iter_.rest_line().substr(0, size); } const StringCordView text() const { return source_view_->text_str().substr(start(), end() - start()); } size_t size() const { return end() - start(); } static const size_t CONTEXT = 3; void highlight(std::ostream& out) const; // Customizable version of 'highlight' method. void print_with_context( std::ostream& out, size_t context, bool highlight, const std::string& funcname) const; const std::shared_ptr<Source>& source() const { return source_view_; } size_t start() const { return start_; } size_t end() const { return end_; } std::string str() const { std::stringstream ss; highlight(ss); return ss.str(); } std::optional<std::tuple<std::string, size_t, size_t>> file_line_col() const { if (!source_view_ || !source()->filename()) { return std::nullopt; } auto lineno = source_view_->lineno_for_offset(start_); auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno); // TODO: std::optional<>::value returns an rvalue ref so can't use it here?? return std::make_tuple<std::string, size_t, size_t>( source_view_->filename().value_or(""), source_view_->lineno_to_source_lineno(lineno), (size_t)col_offset); } bool operator==(const SourceRange& rhs) const { return start() == rhs.start() && end() == rhs.end() && source() == rhs.source(); } bool operator!=(const SourceRange& rhs) const { return !(*this == rhs); } std::optional<SourceRange> findSourceRangeThatGenerated() const { if (!source_view_) { return std::nullopt; } return source_view_->findSourceRangeThatGenerated(*this); } protected: std::shared_ptr<Source> source_view_; private: size_t start_; size_t end_; StringCordView::Iterator start_iter_; }; // OwnedSourceRange is just like a SourceRange except that it owns a `Source` // instead of `Source`. Thus OwnedSourceRange owns a copy of source text. struct OwnedSourceRange : public SourceRange { explicit OwnedSourceRange(const SourceRange& source_range) : SourceRange(source_range) { const auto& source = source_range.source(); if (source) { source_view_ = std::make_shared<Source>( source->text_str().str(), source->filename(), source->starting_line_no()); } } }; struct TORCH_API SourceRangeHasher { public: size_t operator()(const torch::jit::SourceRange& key) const; }; struct StackEntry { std::string filename; SourceRange range; }; TORCH_API void format_stack_trace( std::ostream& out, const std::vector<StackEntry>& entries); inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) { range.highlight(out); return out; } // A pair of (byte offset, SourceRange) describing a specific segment // of the output stream struct TaggedRange { TaggedRange(size_t bytes, SourceRange range) : bytes(bytes), range(std::move(range)) {} size_t bytes; SourceRange range; }; using SourceRangeRecords = std::vector<TaggedRange>; using SourceRangeTagMap = std::unordered_map<SourceRange, int64_t, SourceRangeHasher>; } // namespace torch::jit namespace std { template <> struct iterator_traits<torch::jit::StringCordView::Iterator> { using value_type = char; using difference_type = ptrdiff_t; using pointer = char*; using reference = char&; using iterator_category = std::forward_iterator_tag; }; } // namespace std ```
========================================================================================================================================== SOURCE CODE FILE: source_ref.h LINES: 1 SIZE: 1.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\source_ref.h ENCODING: utf-8 ```h #pragma once #include <functional> #include <memory> #include <ATen/core/ivalue.h> #include <c10/macros/Export.h> #include <torch/csrc/jit/frontend/source_range.h> namespace torch::jit { /** * SourceRef does two things: * 1. Owns a Source object. * 2. Serves as lookup key to the owned Source in associative containers, for * runtime data aggregation. * We don't want to use std::shared_ptr<Source> directly because we want to * support heteogeneous lookup, and also shared_ptr is an implementation detail * which should be encapsulated. */ class TORCH_API SourceRef : public CustomClassHolder { public: explicit SourceRef(std::shared_ptr<Source> source_view) : source_view_(std::move(source_view)) {} bool operator==(const SourceRef& other) const { return source_view_ == other.source_view_; } bool operator<(const Source& other) const { return source_view_.get() < &other; } friend bool operator<(const Source& other, const SourceRef& self) { return &other < self.source_view_.get(); } bool operator<(const SourceRef& other) const { return *this < *other.source_view_; } const Source* operator->() const { return source_view_.get(); } private: std::shared_ptr<Source> source_view_; }; } // namespace torch::jit ```
====================================================================================================================================== SOURCE CODE FILE: strtod.h LINES: 1 SIZE: 0.22 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\strtod.h ENCODING: utf-8 ```h #pragma once #include <c10/macros/Macros.h> namespace torch::jit { TORCH_API double strtod_c(const char* nptr, char** endptr); TORCH_API float strtof_c(const char* nptr, char** endptr); } // namespace torch::jit ```
============================================================================================================================================= SOURCE CODE FILE: sugared_value.h LINES: 1 SIZE: 28.08 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\sugared_value.h ENCODING: utf-8 ```h #pragma once #include <memory> #include <optional> #include <string> #include <utility> #include <ATen/core/symbol.h> #include <caffe2/serialize/versions.h> #include <torch/csrc/jit/api/module.h> #include <torch/csrc/jit/frontend/error_report.h> #include <torch/csrc/jit/frontend/schema_matching.h> #include <torch/csrc/jit/frontend/versioned_symbols.h> #include <torch/csrc/jit/ir/ir.h> namespace torch::jit { using SugaredValuePtr = std::shared_ptr<SugaredValue>; // The AST can contain nodes like `self`, `self.b` or `python_fn` that // are not first-class values in the graph representation, but instead // will be desugared based on how they are used in the AST. // SugaredValue is used to temporarily represent these values in a way // that separates their behavior from the AST -> IR converter itself. // This allows us to keep dependencies on python minimal. struct TORCH_API SugaredValue : public std::enable_shared_from_this<SugaredValue> { // what is this node? for error reporting (e.g. Module, python function) virtual std::string kind() const = 0; // what can we do with this thing? // use it as a value e.g. `this + 4` virtual Value* asValue(const SourceRange& loc, GraphFunction& m) { throw(ErrorReport(loc) << kind() << " cannot be used as a value"); } // select an attribute on it, e.g. `this.field` virtual std::shared_ptr<SugaredValue> attr( const SourceRange& loc, GraphFunction& m, const std::string& field) { throw(ErrorReport(loc) << "attribute lookup is not defined on " << kind()); } virtual bool hasAttr( const SourceRange& loc, GraphFunction& m, const std::string& field) { throw(ErrorReport(loc) << "attribute lookup is not defined on " << kind()); } // assign an attribute on it, e.g. `this.field = newValue` virtual void setAttr( const SourceRange& loc, GraphFunction& m, const std::string& field, Value* newValue) { throw( ErrorReport(loc) << "attribute assignment is not defined on " << kind()); } // use it as a vector of values, e.g. a tuple of values as return value from // a method invocation virtual std::vector<std::shared_ptr<SugaredValue>> asTuple( const SourceRange& loc, GraphFunction& m, const std::optional<size_t>& size_hint = {}) { throw(ErrorReport(loc) << kind() << " cannot be used as a tuple"); } // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API virtual SugaredValuePtr asTupleValue( const SourceRange& loc, GraphFunction& m) { throw(ErrorReport(loc) << kind() << " cannot be used as a tuplevalue"); } virtual std::vector<std::shared_ptr<SugaredValue>> asType( const SourceRange& loc, Method& m) { throw(ErrorReport(loc) << kind() << " cannot be used as a type"); } // call it like a function, e.g. `outputs = this(inputs)` virtual std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, // note: names for args will be 'argument 0', 'argument 1', etc.. at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) { // n_binders is always set to the number of variables an expression is // syntactically bound to: // a = foo() # 1 binder (note in this case the single binder might be a // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0 // binders // // In subexpressions, like bar() in foo(bar()), n_binders is always set to // 1. n_binders is used as a hint to subexpressions to determine how many // values they should return when that number is ambiguous statically. In // particular it is currently used to decide how many tensors a call to a // python function will return. It is only a hint, functions do not have to // check that n_binders match the number of things they are returning, the // assignment logic will do that anyway. throw(ErrorReport(loc) << "cannot call a " << kind()); } // This function is called when to convert a SugaredValue to its iterator. // For example, when iterating through a Dict we iterate over its keys virtual std::shared_ptr<SugaredValue> iter( const SourceRange& loc, GraphFunction& m) { throw(ErrorReport(loc) << kind() << " cannot be used as an iterable"); } // If we are iterating over a Sugared Value and it returns a value from this // function, then we emit an unrolled loop over the variable. This allows us // to support containers of Heterogenous types, like Module Containers & // Tuples virtual std::optional<int64_t> staticLen() { return std::nullopt; } // When iterating over this SugaredValue, should we emit the for loop as an // unrolled loop. bool shouldEmitUnrolled() { return staticLen() != std::nullopt; } // return length of this thing, if not then it can't be iterated. // If it does not have a statically-determinable length, then it cannot // be iterated over with a modulelist. If it does it must return a constant // Value * virtual Value* len(const SourceRange& loc, GraphFunction& m) { throw( ErrorReport(loc) << "'" << kind() << "'" << " object is not iterable"); } // expression for ith elemement for iterable value virtual std::shared_ptr<SugaredValue> getitem( const SourceRange& loc, GraphFunction& m, Value* idx, TypePtr type_hint = nullptr) { throw( ErrorReport(loc) << "'" << kind() << "'" << " object is not subscriptable"); } virtual ~SugaredValue() = default; }; // most things in the environment are just simple value types // and not special python syntax sugar types struct TORCH_API SimpleValue : public SugaredValue { SimpleValue(Value* value) : value_(value) {} std::string kind() const override { std::stringstream ss; // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) ss << "value of type '" << value_->type()->annotation_str() << "'"; return ss.str(); } Value* asValue(const SourceRange& range, GraphFunction& m) override { return value_; } std::vector<std::shared_ptr<SugaredValue>> asTuple( const SourceRange& loc, GraphFunction& m, const std::optional<size_t>& size_hint = {}) override; std::shared_ptr<SugaredValue> attr( const SourceRange& loc, GraphFunction& m, const std::string& field) override; bool hasAttr( const SourceRange& loc, GraphFunction& m, const std::string& field) override; void setAttr( const SourceRange& loc, GraphFunction& m, const std::string& field, Value* newValue) override; std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, // note: names for args will be 'argument 0', 'argument 1', etc.. at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override; std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m) override; Value* getValue() const { return value_; } Value* len(const SourceRange& loc, GraphFunction& m) override; SugaredValuePtr getitem( const SourceRange& loc, GraphFunction& m, Value* idx, TypePtr type_hint = nullptr) override; private: Value* value_; }; struct TORCH_API BuiltinFunction : public SugaredValue { BuiltinFunction(Symbol symbol, std::optional<NamedValue> self) : symbol(symbol), self(std::move(self)) {} // The symbol of the function (e.g. `aten::relu`). Symbol symbol; // if this is method, then this is the self argument. std::optional<NamedValue> self; std::string kind() const override { return "builtin"; } std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override; // try to create this builtin but if it doesn't exist or the self argument // cannot possibly match, then return nullptr. Use in situations where it is // not clear if it is a valid builtin static std::shared_ptr<BuiltinFunction> tryCreate( Symbol symbol, std::optional<NamedValue> self); }; struct TORCH_API SugaredTupleValue : public SugaredValue { explicit SugaredTupleValue(std::vector<std::shared_ptr<SugaredValue>> tup) : tup_(std::move(tup)) {} std::vector<std::shared_ptr<SugaredValue>> asTuple( const SourceRange& loc, GraphFunction& m, const std::optional<size_t>& size_hint = {}) override { return tup_; } Value* asValue(const SourceRange& loc, GraphFunction& m) override { std::vector<Value*> vec; vec.reserve(tup_.size()); for (const auto& sv : tup_) { vec.push_back(sv->asValue(loc, m)); } Graph& g = *m.graph(); return g.insertNode(g.createTuple(vec))->output(); } std::string kind() const override { return "Tuple"; } SugaredValuePtr getitem( const SourceRange& loc, GraphFunction& m, Value* idx, TypePtr type_hint = nullptr) override { if (!(idx->type()->cast<IntType>() && toIValue(idx))) { throw( ErrorReport(loc) << "Expected integer literal for index but got a variable or non-integer. " << "ModuleList/Sequential indexing is only supported with integer literals. " << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. " << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'"); } auto index = toIValue(idx)->toInt(); int64_t adj_index = (index < 0) ? index + static_cast<int64_t>(tup_.size()) : index; if (!(adj_index >= 0 && adj_index < static_cast<int64_t>(tup_.size()))) { throw( ErrorReport(loc) << "Index " << index << " out of range of length " << tup_.size()); } return tup_.at(adj_index); } // This function is called when a SugaredValue is used to convert a // SugaredValue to its iterator. For example, when iterating through a Dict we // iterate over its keys std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m) override { return shared_from_this(); } // Because this is used to contain SugaredValues of Heterogenous types, // we define staticLen() so that when this is iterated over it is emitted // as an unrolled loop. std::optional<int64_t> staticLen() override { return static_cast<int64_t>(tup_.size()); } std::vector<std::shared_ptr<SugaredValue>> tup_; }; struct TORCH_API BuiltinModule : public SugaredValue { BuiltinModule(std::string name, std::optional<int64_t> version = std::nullopt) : name(std::move(name)), version(version) {} std::string kind() const override { return "builtin module"; } std::shared_ptr<SugaredValue> attr( const SourceRange& loc, GraphFunction& m, const std::string& field) override { if (field == "autograd") { // When refering torch.autograd, it is also considered to be a // BuiltinModule and we will dispatch to the aten operators for the // methods under its module. return std::make_shared<BuiltinModule>("aten", version); } auto sym = Symbol::fromQualString(name + "::" + field); return std::make_shared<BuiltinFunction>(sym, std::nullopt); } private: std::string name; // when we add operator versioning, emit this op as it exising at 'version' // if not set, use the latest version std::optional<int64_t> version; }; // Represents a class, analagous to `int` or `dict`. Instances of classes, // like `1` or `{"foo": 5}`, are represented as SimpleValues struct TORCH_API ClassValue : public SugaredValue { explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {} // Call the type's constructor, as in: // n = Foo(constructor_arg) std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override; std::shared_ptr<SugaredValue> attr( const SourceRange& loc, GraphFunction& m, const std::string& field) override; std::string kind() const override { return type_->str(); } ClassTypePtr type_; }; struct TORCH_API NamedTupleConstructor : public SugaredValue { explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {} std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override; std::string kind() const override { return type_->str(); } TupleTypePtr type_; }; struct FunctionValue : public SugaredValue { FunctionValue(Function* callee) : callees_({callee}) {} FunctionValue(const StrongFunctionPtr& p) : callees_({p.function_}), cu_(p.cu_) {} FunctionValue(const std::vector<StrongFunctionPtr>& callees) { for (const StrongFunctionPtr& callee : callees) { cu_ = cu_ ? cu_ : callee.cu_; TORCH_INTERNAL_ASSERT(callee.cu_ == cu_); callees_.push_back(callee.function_); } } std::string kind() const override { return "function"; } std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& f, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override { std::vector<const FunctionSchema*> schemas; for (Function* callee : callees_) { try { callee->ensure_defined(); } catch (const RecursiveMethodCallError&) { throw( ErrorReport(loc) << " function '" << callee->name() << "' is called recursively. " << "Recursive calls are not supported"); } schemas.push_back(&callee->getSchema()); } auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs); Value* output = f.graph()->insertFunctionCall(callees_[match.first], match.second); output->node()->setSourceRange(loc); return std::make_shared<SimpleValue>(output); } const std::vector<Function*>& callees() { return callees_; } private: std::vector<Function*> callees_; // TODO holding this thing is creepy std::shared_ptr<CompilationUnit> cu_; }; struct TORCH_API ClosureValue : public SugaredValue { ClosureValue(Value* value) : value_(value) { TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure); } std::string kind() const override { return "closure"; } Value* asValue(const SourceRange& range, GraphFunction& m) override { return value_; } Value* value_; }; // defines how a method obtained from a module/class/interface behaves in script struct MethodValue : public SugaredValue { MethodValue(Value* self, std::vector<std::string> method_names) : self_(self), method_names_(std::move(method_names)) {} MethodValue(Value* self, std::string method_name) : MethodValue(self, std::vector<std::string>({std::move(method_name)})) {} std::string kind() const override { return "method"; } std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& f, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override { std::vector<NamedValue> argsWithSelf = {self_}; argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end()); std::vector<const FunctionSchema*> schemas; for (const std::string& method_name : method_names_) { if (auto class_type = self_->type()->cast<ClassType>()) { Function& method = class_type->getMethod(method_name); try { method.ensure_defined(); } catch (const RecursiveMethodCallError&) { throw( ErrorReport(loc) << " method '" << method.name() << "' is called recursively. " << "Recursive calls are not supported"); } schemas.push_back(&method.getSchema()); } else if (auto interface_type = self_->type()->cast<InterfaceType>()) { schemas.push_back(interface_type->getMethod(method_name)); } else { TORCH_INTERNAL_ASSERT( false, "method constructed that is not a class or interface"); } } auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs); Value* output = f.graph()->insertMethodCall(method_names_[match.first], match.second); output->node()->setSourceRange(loc); return std::make_shared<SimpleValue>(output); } private: Value* self_; std::vector<std::string> method_names_; }; struct TORCH_API PrintValue : public SugaredValue { std::string kind() const override { return "print"; } std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override; }; // expressions like int(x) // these are the same as call prim::Int or equivalent except it // is a noop when the input is a subtype of 'type' struct TORCH_API CastValue : public BuiltinFunction { CastValue(TypePtr type, c10::Symbol method) : BuiltinFunction(method, std::nullopt), type_(std::move(type)) {} std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override { if (args.size() == 1 && kwargs.empty()) { auto len_op = std::make_shared<BuiltinFunction>(aten::len, std::nullopt); auto gt_op = std::make_shared<BuiltinFunction>(aten::gt, std::nullopt); auto zero = m.graph()->insertConstant(0); auto v = args[0].value(*m.graph()); if (v->type()->isSubtypeOf(*type_)) { return std::make_shared<SimpleValue>(v); } else if ( *type_ == *BoolType::get() && (v->type()->isSubtypeOf(*AnyListType::get()) || v->type()->isSubtypeOf(*StringType::get()) || v->type()->cast<DictType>())) { auto len = len_op->call(loc, m, {v}, {}, 1); return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1); } } return BuiltinFunction::call(loc, m, args, kwargs, n_binders); } private: TypePtr type_; }; struct TORCH_API TensorCastValue : public SugaredValue { TensorCastValue(at::ScalarType type, NamedValue self) : dtype_(type), self_(std::move(self)) {} std::string kind() const override { return "Cast"; } std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override { TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty()); Value* dtype_const = m.graph()->insertConstant(dtype_, loc); std::vector<NamedValue> kwargs_{ self_, NamedValue(loc, "dtype", dtype_const)}; Value* casted_val = m.graph()->insert( /*opname=*/Symbol::fromQualString("aten::to"), /*args=*/args, /*kwargs=*/kwargs_, /*range=*/loc); return std::make_shared<SimpleValue>(casted_val); } at::ScalarType dtype_; NamedValue self_; }; // builtins operators and functions that call a method if it exists // on a class type, like 'len(x)' and 'x + y' struct TORCH_API MagicMethod : public SugaredValue { MagicMethod(std::string desugared_name, SugaredValuePtr base) : base_value_(std::move(base)), desugared_name_(std::move(desugared_name)) {} std::string kind() const override { return desugared_name_; } std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs, size_t n_binders) override; private: SugaredValuePtr base_value_; std::string desugared_name_; }; // things that look like function applications, but // perform non-standard evaluation are represented // with SpecialFormValues, e.g. // isinstance(x, int) // fork(fn) // annotate(int, 3) // The implementation of each value is handled by a case inside emitApplyExpr struct TORCH_API SpecialFormValue : public SugaredValue { SpecialFormValue(Symbol form) : form_(form) {} std::string kind() const override { return form_.toUnqualString(); } Symbol form() const { return form_; } static std::shared_ptr<SpecialFormValue> create(Symbol form) { return std::make_shared<SpecialFormValue>(form); } private: Symbol form_; }; struct TORCH_API LegacyTensorConstructor : public SpecialFormValue { LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device) : SpecialFormValue(form), device_(device), dtype_(dtype) {} static std::shared_ptr<LegacyTensorConstructor> create( Symbol form, at::ScalarType dtype, at::Device device) { return std::make_shared<LegacyTensorConstructor>(form, dtype, device); } at::ScalarType dtype() const { return dtype_; } private: at::Device device_; at::ScalarType dtype_; }; // matched against for special handling of range expressions struct TORCH_API RangeValue : SugaredValue { RangeValue( const SourceRange& loc, GraphFunction& m, std::vector<Value*> input, std::optional<int64_t> static_len = std::nullopt); std::string kind() const override { return "range"; } Value* len(const SourceRange& loc, GraphFunction& m) override; SugaredValuePtr getitem( const SourceRange& loc, GraphFunction& m, Value* idx, TypePtr type_hint = nullptr) override; std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m) override; // When Range is instantiated via enumerate(iterable_with_static_len), // then it takes the static length of the iterable std::optional<int64_t> staticLen() override { return static_len_; } private: Value* start_{}; Value* end_{}; Value* step_{}; // a flag to determine if it's a simple range() call with only end_ from // arguments If true, we will not insert length calculation and index // derivation nodes to simplify the graph and enable more possible // optimizations bool has_only_end_{}; std::optional<int64_t> static_len_; }; // Specialized Tree structure to matched against for special handling // of builtin functions iterables expressions like zip(), enumerate(), etc. // zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: // zip(x, y) -> (x, y) with tuple assignment to each loop target // enumerate(x) -> (range(0, math.inf, 1), x) // So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: // (a, (range(0, math.inf, 1), b), range(0, 100)) // We use those base iterables to fill in the loop information like // max_trip_count and set the value table for loop targets // Iterables can contain lists of SugaredValues like ModuleLists. If it // does, then we emit it unrolled and require that all values it contains // have a statically-determinable length. struct TORCH_API IterableTree : SugaredValue { IterableTree() = default; IterableTree( const SourceRange& range, GraphFunction& m, at::ArrayRef<SugaredValuePtr> children) { for (const auto& child : children) { addChild(range, m, child); } } std::string kind() const override { return "iterabletree"; } std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m) override { return shared_from_this(); } void addChild( const SourceRange& range, GraphFunction& m, const SugaredValuePtr& iter_value); std::vector<SugaredValuePtr> get_children() { return children_; } // If this iterable contains a ModuleList or Tuple, then it will have a // static length, and we will emit it as an unrolled for loop. std::optional<int64_t> staticLen() override { return unroll_length_; } // given a IterableTree node, get all the base iterables/leaves under the // IterableTree node. This enables // us to get all the basic SugaredValues that contains valid loop information // with len() and getitem() std::vector<SugaredValuePtr> get_base_iterables(); Value* len(const SourceRange& loc, GraphFunction& m) override; SugaredValuePtr getitem( const SourceRange& loc, GraphFunction& m, Value* idx, TypePtr type_hint = nullptr) override; private: std::optional<int64_t> unroll_length_ = std::nullopt; std::vector<SugaredValuePtr> children_; }; static inline std::vector<Value*> toValues( Graph& g, at::ArrayRef<NamedValue> nvs) { return fmap(nvs, [&](const NamedValue& v) { return v.value(g); }); } struct SimpleSelf : public Self { explicit SimpleSelf(ClassTypePtr classType) : Self(), classType_(std::move(classType)) {} std::shared_ptr<SugaredValue> makeSugared(Value* v) const override { v->setType(classType_); return std::make_shared<SimpleValue>(v); } ClassTypePtr getClassType() const override { return classType_; } private: ClassTypePtr classType_; }; // This is not a SimpleValue so it can not pass through the code paths that // expect a SimpleValue as a sugared value. struct TORCH_API ExceptionMessageValue : public SugaredValue { explicit ExceptionMessageValue( Value* value, Value* qualified_class_name = nullptr) : value_(value), qualified_class_name_(qualified_class_name) {} std::string kind() const override { return "exception message"; } Value* getValue() { return value_; } // qualified python class name Value* getQualifiedClassName() { return qualified_class_name_; } private: Value* value_; Value* qualified_class_name_; }; struct TORCH_API ExceptionValue : public SugaredValue { explicit ExceptionValue(std::string message) : message_(std::move(message)) {} std::string kind() const override { return "exception"; } std::shared_ptr<SugaredValue> call( const SourceRange& loc, GraphFunction& m, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> /*attributes*/, size_t /*n_binders*/) override { auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc); for (auto& input : args) { auto input_str = input.value(*m.graph()); if (!input_str->type()->isSubtypeOf(*StringType::get())) { input_str = emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {}); } exception_message = emitBuiltinCall( loc, *m.graph(), aten::add, {exception_message, input_str}, {}); } return std::make_shared<ExceptionMessageValue>(exception_message); } std::string message_; }; struct TORCH_API SugaredEnumClass : public SugaredValue { explicit SugaredEnumClass(EnumTypePtr enum_type) : enum_type_(std::move(enum_type)) {} std::string kind() const override { return "EnumClass"; } SugaredValuePtr attr( const SourceRange& loc, GraphFunction& m, const std::string& field) override; SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override; private: EnumTypePtr enum_type_; }; struct TORCH_API SliceValue : public SugaredValue { explicit SliceValue(Value* start, Value* stop, Value* step) : start_(start), stop_(stop), step_(step) {} std::string kind() const override { return "Python slice value"; } Value* start() { return start_; } Value* stop() { return stop_; } Value* step() { return step_; } private: Value* start_; Value* stop_; Value* step_; }; } // namespace torch::jit ```
====================================================================================================================================== SOURCE CODE FILE: tracer.h LINES: 1 SIZE: 12.89 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\tracer.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/Dimname.h> #include <ATen/core/class_type.h> #include <ATen/core/jit_type.h> #include <ATen/core/stack.h> #include <ATen/core/symbol.h> #include <c10/util/Exception.h> #include <torch/csrc/Export.h> #include <torch/csrc/jit/frontend/source_range.h> #include <torch/csrc/utils/variadic.h> #include <cstdint> #include <memory> #include <unordered_map> #include <vector> namespace torch::jit { struct Node; struct Value; struct Graph; struct Module; namespace tracer { using ::c10::ivalue::Shared; using ::c10::IValue; using ::c10::ivalue::Future; using ::c10::ArrayRef; using ::c10::TupleType; using ::c10::TupleTypePtr; using ::c10::ivalue::ConstantString; using torch::autograd::Variable; using variable_list = std::vector<Variable>; TORCH_API std::atomic<bool>& getTracerStateWarnMode(); struct TORCH_API TracingState : public std::enable_shared_from_this<TracingState> { TracingState(); ~TracingState(); // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::shared_ptr<Graph> graph; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) bool warn = getTracerStateWarnMode(); // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) bool strict = true; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) bool force_outplace = false; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::function<std::string(const Variable& var)> lookup_var_name_fn = [](const Variable& var) { return ""; }; void enterFrame() { env_stack.emplace_back(); } void leaveFrame() { env_stack.pop_back(); } void setValue(const IValue& v, Value* value); void delValue(const IValue& var); Value* getValue(const IValue& var); Value* getOutput(const IValue& var, size_t i); bool hasValue(const IValue& var) const; Node* createNode(c10::Symbol op_name, size_t num_outputs); void insertNode(Node* node); private: using WeakIValue = at::WeakIValue; struct WeakIValueHasher { size_t operator()(const WeakIValue& t) const { return t.hash(); } }; struct WeakIValueEq { bool operator()(const WeakIValue& t1, const WeakIValue& t2) const { return t1.isSameIdentity(t2); } }; using Frame = std::unordered_map<WeakIValue, Value*, WeakIValueHasher, WeakIValueEq>; std::vector<Frame> env_stack; }; // This is meant to be used as a thread local place, where we can store extra // info that gets lost when we call into ATen from Python bindings. One example // for when this happens is when we get an IntArrayRef argument with e.g. sizes // for view. When tracing, those might be tensors, which let us encode extra // data dependencies, but once they get to the ATen call where we actually have // the tracing logic, they get converted into a raw IntArrayRef, and we loose // all information. To prevent this, we temporarily stash it in here. struct ArgumentStash { struct IntArrayRefTrace : std::vector<Value*> { IntArrayRefTrace(size_t size) : std::vector<Value*>(size, nullptr) {} }; static bool empty() { return stash.intlists.empty(); } TORCH_API static void stashIntArrayRefElem( const std::string& arg_name, size_t size, size_t idx, const Variable& var); static bool hasIntArrayRef(const std::string& arg_name) { return stash.intlists.count(arg_name) > 0; } static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) { auto info = std::move(stash.intlists.at(arg_name)); stash.intlists.erase(arg_name); return info; } // Value stashing: Use these methods to stash arguments which correspond // to regular Value*'s in the graph. i.e. they don't require special // handling like in the case of IntArrayRefs TORCH_API static void stashValue( const std::string& arg_name, size_t idx, const Variable& var, const c10::TypePtr& type = nullptr); static bool hasValue(const std::string& arg_name) { return stash.values.count(arg_name) > 0; } static Value* popValue(const std::string& arg_name) { auto info = stash.values.at(arg_name); stash.values.erase(arg_name); return info; } private: static thread_local ArgumentStash stash; std::unordered_map<std::string, IntArrayRefTrace> intlists; std::unordered_map<std::string, Value*> values; }; // Retrieve or set the current tracing state. Returns a nullptr if tracing is // disabled. TORCH_API const std::shared_ptr<TracingState>& getTracingState(); TORCH_API void setTracingState(std::shared_ptr<TracingState> state); inline bool isTracing() { return static_cast<bool>(getTracingState()); } using warn_fn_type = void (*)(const std::string& msg); TORCH_API extern const char* WARN_PYTHON_DATAFLOW; TORCH_API extern const char* WARN_CONSTRUCTOR; TORCH_API extern const char* WARN_RESIZE; TORCH_API extern const char* STRICT_TRACER_MSG; TORCH_API void _do_warn(const char* _reason, const char* _kind); inline void warn(const char* _reason, const char* _kind = nullptr) { if (const auto& state = getTracingState()) { if (!state->warn) return; _do_warn(_reason, _kind); } } TORCH_API void setWarn(warn_fn_type fn); struct TORCH_API NoWarn { NoWarn() : state(getTracingState()) { if (state) { prev = state->warn; state->warn = false; } } ~NoWarn() { if (state) { state->warn = prev; } } std::shared_ptr<TracingState> state; bool prev{false}; }; struct WithNestedTracingFrame { WithNestedTracingFrame() { getTracingState()->enterFrame(); } ~WithNestedTracingFrame() { getTracingState()->leaveFrame(); } }; TORCH_API void recordSourceLocation(Node* n); TORCH_API void setRecordSourceLocation(void (*v)(Node*)); TORCH_API std::vector<StackEntry> pythonCallstack(); TORCH_API void setPythonCallstack(std::vector<StackEntry> (*v)()); // Having finished adding a new 'node' to the graph IR 'setValueTrace' // associates this node with an output variable, so that further operations // involving this variable know which node in the IR to reference. TORCH_API void setValueTrace(const IValue& v, Value* value); TORCH_API void delValueTrace(const IValue& var); TORCH_API std::function<void()> pauseTracing(); TORCH_API Value* getValueTrace(const IValue& var); TORCH_API std::pair<std::shared_ptr<TracingState>, Stack> trace( Stack inputs, const std::function<Stack(Stack)>& traced_fn, std::function<std::string(const Variable&)> var_name_lookup_fn, bool strict = true, bool force_outplace = false, Module* self = nullptr, const std::vector<std::string>& argument_names = {}); TORCH_API void abandon(); // NB: those serve both as an intermediate steps in addInputs below, // as well as the overloads that terminate template recursion TORCH_API void addInputs(Node* n, const char* name, int64_t value); TORCH_API void addInputs(Node* n, const char* name, const c10::SymInt& value); TORCH_API void addInputs( Node* n, const char* name, std::optional<int64_t> value); TORCH_API void addInputs(Node* n, const char* name, bool value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<bool>& value); TORCH_API void addInputs(Node* n, const char* name, double value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<double>& value); TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<at::Scalar>& value); TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<at::Tensor>& value); TORCH_API void addInputs(Node* n, const char* name, ArrayRef<int64_t> value); TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value); TORCH_API void addInputs( Node* n, const char* name, std::optional<c10::SymInt> value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<ArrayRef<int64_t>>& value); TORCH_API void addInputs( Node* n, const char* name, const at::OptionalIntArrayRef& opt_value); TORCH_API void addInputs( Node* n, const char* name, const at::OptionalSymIntArrayRef& opt_value); TORCH_API void addInputs( Node* n, const char* name, ArrayRef<at::Tensor> value, bool allow_undefined = false); TORCH_API void addInputs( Node* n, const char* name, const std::vector<at::Tensor>& value, bool allow_undefined = false); TORCH_API void addInputs( Node* n, const char* name, at::ITensorListRef value, bool allow_undefined = false); TORCH_API void addInputs( Node* n, const char* name, const List<std::optional<at::Tensor>>& value); TORCH_API void addInputs( Node* n, const char* name, ArrayRef<c10::intrusive_ptr<c10::ivalue::Object>> value, const c10::ClassTypePtr& class_type); TORCH_API void addInputs(Node* n, const char* name, ArrayRef<double> value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<ArrayRef<double>>& value); TORCH_API void addInputs( Node* n, const char* name, const std::string_view value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<std::string_view>& value); TORCH_API void addInputs(Node* n, const char* name, at::Device value); TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream); TORCH_API void addInputs(Node* n, const char* name, at::Layout value); TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<at::ScalarType>& value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<at::Device>& value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<at::Layout>& value); TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value); TORCH_API void addInputs( Node* n, const char* name, std::optional<at::DimnameList> value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<at::MemoryFormat>& value); TORCH_API void addInputs( Node* n, const char* name, const std::optional<at::Generator>& value); inline void addInputs( Node* n, const char* name, const std::vector<bool>& value) { TORCH_CHECK(false, "Tracing a list of bool type is currently not supported!"); } template <typename T> void addInputs(Node* n, const char* name, ArrayRef<T> value) { TORCH_CHECK( false, "Tracing a list of arbitrary type is currently not supported!"); } template <typename K, typename V> void addInputs( Node* n, const char* name, const std::unordered_map<K, V>& value) { TORCH_CHECK( false, "Tracing a dict of arbitrary types is currently not supported!"); } template <size_t N> void addInputs(Node* n, const char* name, std::array<bool, N> value) { throw std::runtime_error( "Found an unsupported argument type in the JIT tracer. File a bug report."); } TORCH_API void addInputs( Node* n, const char* name, const c10::intrusive_ptr<c10::ivalue::Object>& obj); TORCH_API void ensureUniqueIfOutOfPlaced( const char* name, const at::Tensor& tensor); TORCH_API void ensureUniqueIfOutOfPlaced( const char* name, const std::optional<at::Tensor>& tensor); template < typename T, typename = std::enable_if_t< (!std::is_convertible_v<std::decay_t<T>, at::TensorList> && !std::is_convertible_v<std::decay_t<T>, c10::List<at::Tensor>> && !std::is_convertible_v<std::decay_t<T>, at::Tensor> && !std::is_convertible_v< std::decay_t<T>, c10::intrusive_ptr<c10::ivalue::Object>>)>> void addOutput(Node* node, T&&) { TORCH_CHECK( false, "Found an unsupported argument type ", c10::demangle_type<T>(), " in the JIT tracer. File a bug report."); } TORCH_API void addOutput(Node* node, const at::Tensor& tensor); TORCH_API void setOutput(Value* value, const at::Tensor& output); TORCH_API void addOutput(Node* node, const std::vector<at::Tensor>& list); TORCH_API void addOutput(Node* node, const c10::List<at::Tensor>& list); TORCH_API void addOutput( Node* node, const c10::intrusive_ptr<c10::ivalue::Object>& output); TORCH_API autograd::Variable getSizeOf( const autograd::Variable& var, int64_t dim); TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var); } // namespace tracer } // namespace torch::jit ```
==================================================================================================================================== SOURCE CODE FILE: tree.h LINES: 5 SIZE: 6.66 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\tree.h ENCODING: utf-8 ```h #pragma once #include <functional> #include <memory> #include <unordered_map> #include <vector> #include <c10/util/SmallVector.h> #include <c10/util/intrusive_ptr.h> #include <torch/csrc/jit/frontend/lexer.h> namespace torch::jit { // Trees are used to represent all forms of TC IR, pre- and post-typechecking. // Rather than have a full class hierarchy for all TC statements, trees are a // slight variation of Lisp s-expressions. For instance, the expression a*b+1 // is represented as: // (+ (* (ident a) (ident b)) (const 1)) // Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which // define stringValue(). Everything else is a Compound object, which has a // 'kind' that is a token from lexer.h's TokenKind enum. Single-character // operators like '+' are represented using the character itself (so, add.kind() // would be '+'). Each Compound object also contains a list of subtrees and is // associated with a SourceRange for error reporting. // Memory management of trees is done using intrusive_ptr. struct Tree; using TreeRef = c10::intrusive_ptr<Tree>; using TreeList = at::SmallVector<TreeRef, 4>; struct Tree : c10::intrusive_ptr_target { Tree(int kind_) : kind_(kind_) {} int kind() const { return kind_; } virtual bool isAtom() const { return true; } virtual const SourceRange& range() const { throw std::runtime_error("is an Atom"); } virtual const std::string& stringValue() const { throw std::runtime_error("stringValue can only be called on TK_STRING"); } virtual const TreeList& trees() const { static const TreeList empty_trees = {}; return empty_trees; } const TreeRef& tree(size_t i) const { return trees().at(i); } virtual TreeRef map(const std::function<TreeRef(TreeRef)>& fn) { (void)fn; c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer // from a raw `this` pointer // so we need to bump the refcount // to account for this ownership return TreeRef::reclaim(this); } template <typename... Args> void match(int k, Args&... args) const { matchD(k, "unknown", 0, args...); } template <typename... Args> void matchD(int k, const char* filename, int lineno, Args&... args) const { std::initializer_list<TreeRef*> vars = {args...}; matchNumSubtreesD(k, filename, lineno, vars.size(), true); size_t i = 0; for (TreeRef* v : vars) { *v = trees()[i++]; } } void matchNumSubtrees(int k, size_t expected_subtrees) { return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false); } void matchNumSubtreesD( int k, const char* filename, int lineno, size_t expected_subtrees, bool allow_more) const { if (kind() != k) { std::stringstream ss; ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k) << "' but found '" << kindToString(kind()) << "'\n"; range().highlight(ss); throw std::runtime_error(ss.str()); } if (trees().size() < expected_subtrees || (!allow_more && trees().size() != expected_subtrees)) { std::stringstream ss; ss << filename << ":" << lineno << ": expected at least " << expected_subtrees << " subtrees, but found only " << trees().size() << "\n"; range().highlight(ss); throw std::runtime_error(ss.str()); } } ~Tree() override = default; private: int kind_; }; struct String : public Tree { String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {} const std::string& stringValue() const override { return value_; } template <typename... Args> static TreeRef create(Args&&... args) { return c10::make_intrusive<String>(std::forward<Args>(args)...); } private: std::string value_; }; static SourceRange mergeRanges(SourceRange c, const TreeList& others) { for (const auto& t : others) { if (t->isAtom()) continue; size_t s = std::min(c.start(), t->range().start()); size_t e = std::max(c.end(), t->range().end()); c = SourceRange(c.source(), s, e); } return c; } struct Compound : public Tree { Compound(int kind, SourceRange range) : Tree(kind), range_(std::move(range)) {} Compound(int kind, const SourceRange& range_, TreeList&& trees_) : Tree(kind), range_(mergeRanges(range_, trees_)), trees_(std::move(trees_)) {} const TreeList& trees() const override { return trees_; } static TreeRef create( int kind, const SourceRange& range_, TreeList&& trees_) { return c10::make_intrusive<Compound>(kind, range_, std::move(trees_)); } bool isAtom() const override { return false; } TreeRef map(const std::function<TreeRef(TreeRef)>& fn) override { TreeList ret; for (auto& t : trees()) { ret.push_back(fn(t)); } return Compound::create(kind(), range(), std::move(ret)); } const SourceRange& range() const override { return range_; } private: SourceRange range_; TreeList trees_; }; // tree pretty printer struct pretty_tree { pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {} const TreeRef& tree; size_t col; std::unordered_map<TreeRef, std::string> flat_strings; const std::string& get_flat(const TreeRef& t) { auto it = flat_strings.find(t); if (it != flat_strings.end()) return it->second; std::stringstream out; switch (t->kind()) { case TK_STRING: out << t->stringValue(); break; default: out << "(" << kindToString(t->kind()); for (const auto& e : t->trees()) { out << " " << get_flat(e); } out << ")"; break; } auto it_ = flat_strings.emplace(t, out.str()); return it_.first->second; } void print(std::ostream& out, const TreeRef& t, int indent) { const std::string& s = get_flat(t); if (indent + s.size() < col || t->isAtom()) { out << s; return; } std::string k = kindToString(t->kind()); out << "(" << k; for (const auto& e : t->trees()) { out << "\n" << std::string(indent + 2, ' '); print(out, e, indent + 2); } out << ")"; } }; static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) { t_.print(out, t_.tree, 0); return out << '\n'; } static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) { return out << pretty_tree(t); } } // namespace torch::jit ```
========================================================================================================================================== SOURCE CODE FILE: tree_views.h LINES: 1 SIZE: 37.58 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\tree_views.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/frontend/error_report.h> #include <torch/csrc/jit/frontend/strtod.h> #include <torch/csrc/jit/frontend/tree.h> #include <c10/util/complex.h> #include <functional> #include <iostream> #include <string> #include <utility> namespace torch::jit { // clang-format off // TreeView provides a statically-typed way to traverse the tree, which should // be formed according to the grammar below. // // A few notes on types and their aliases: // - List<T> is really a Tree with kind TK_LIST and elements as subtrees // - Maybe<T> is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T // - Builtin types are: Ident (TK_IDENT), String (TK_STRING) // // Param = Param(Maybe<Expr> type, Ident name) TK_PARAM // // Decl = Decl(List<Param> params, Maybe<Expr> return_type) TK_DECL // Def = Def(Ident name, Decl decl, List<Stmt> body) TK_DEF // ClassDef = ClassDef(Ident name, TK_CLASS_DEF // Maybe<Expr> superclass, // List<Stmt> body) // // Stmt = If(Expr cond, List<Stmt> true_body, List<Stmt> false_body) TK_IF // | For(List<Expr> targets, List<Expr> iters, List<Stmt> body) TK_FOR // | While(Expr cond, List<Stmt> body) TK_WHILE // | Global(List<Ident> idents) TK_GLOBAL // -- NB: the only type of Expr's allowed on lhs are Var // Or a tuple containing Var with an optional terminating Starred // | Assign(Expr lhs, Maybe<Expr> rhs, Maybe<Expr> type) TK_ASSIGN // | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN // | Return(List<Expr> values) TK_RETURN // | ExprStmt(List<Expr> expr) TK_EXPR_STMT // | Raise(Expr expr) TK_RAISE // | Def TK_DEF // | With(List<WithItem> targets, List<Stmt> body) TK_WITH // // Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR // | BinOp(Expr lhs, Expr rhs) // | And TK_AND // | Or TK_OR // | Lt '<' // | Gt '>' // | Eq TK_EQ // | Le TK_LE // | Ge TK_GE // | Ne TK_NE // | Is TK_IS // | IsNot TK_ISNOT // | Add '+' // | Sub '-' // | Mul '*' // | Div '/' // | Mod '%' // | MatMult '@' // | Pow TK_POW // | UnaryOp(Expr expr) // | Not TK_NOT // | USub '-' // | Const(String value) TK_CONST // -- NB: x.name(y) is desugared into name(x, y) // | Apply(Ident name, List<Expr> args, List<Attribute> kwargs) TK_APPLY // | Select(Expr value, Ident selector) '.' // | Subscript(Expr value, List<Expr> subscript_exprs) TK_SUBSCRIPT // | SliceExpr(Maybe<Expr> start, Maybe<Expr> end) TK_SLICE_EXPR // | Var(Ident name) TK_VAR // | ListLiteral(List<Expr> inputs) TK_LIST_LITERAL // | TupleLiteral(List<Expr> inputs) TK_TUPLE_LITERAL // | Starred(Expr expr) TK_STARRED // | WithItem(Expr target, Maybe<Var> var) TK_WITH_ITEM // -- NB: only allowed expressions are Const or List(Const) // (List as a value, not type constructor) // Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE // // AugAssignKind = // | Add() TK_PLUS_EQ // | Sub() TK_MINUS_EQ // | Mul() TK_TIMES_EQ // | Div() TK_DIV_EQ // | Mod() TK_MOD_EQ // // Each subclass of TreeView should provide: // 1. Constructor that takes a TreeRef, and checks that it's of the right type. // 2. Accessors that get underlying information out of the object. If they // return subtrees, they should wrap them in appropriate views too. // 3. Static method 'create' that creates the underlying TreeRef object // for every TreeRef kind that has a TreeView, the parser always uses // (e.g.) Ident::create rather than Compound::Create, this means that // changes to the structure of Ident are always made right here rather // than both in the parser and in this code. // XXX: these structs should have no fields to prevent slicing when passing by value // clang-format on struct TreeView { explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {} TreeRef tree() const { return tree_; } const SourceRange& range() const { return tree_->range(); } operator TreeRef() const { return tree_; } const TreeRef& get() const { return tree_; } int kind() const { return tree_->kind(); } void dump() const { std::cout << tree_; } protected: const TreeRef& subtree(size_t i) const { return tree_->trees().at(i); } // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) TreeRef tree_; }; template <typename T> struct ListIterator { ListIterator(TreeList::const_iterator it) : it(it) {} bool operator!=(const ListIterator& rhs) const { return it != rhs.it; } bool operator==(const ListIterator& rhs) const { return it == rhs.it; } T operator*() const { return T(*it); } ListIterator& operator+=(std::ptrdiff_t n) { it += n; return *this; } ListIterator& operator++() { ++it; return *this; } ListIterator& operator--() { --it; return *this; } private: TreeList::const_iterator it; }; template <typename T> struct List : public TreeView { using iterator = ListIterator<T>; using const_iterator = ListIterator<T>; List(const TreeRef& tree) : TreeView(tree) { tree->match(TK_LIST); // Iterate over list to temporarily instantiate Ts that will check the type for (const T& elem : *this) { (void)elem; // silence unused warning } } iterator begin() const { return iterator(tree_->trees().begin()); } iterator end() const { return iterator(tree_->trees().end()); } bool empty() const { return tree_->trees().begin() == tree_->trees().end(); } T operator[](size_t i) const { return T(subtree(i)); } TreeRef map(const std::function<TreeRef(const T&)>& fn) { return tree_->map([&](TreeRef v) { return fn(T(v)); }); } static List create(const SourceRange& range, const std::vector<T>& subtrees) { TreeList type_erased_sub{subtrees.begin(), subtrees.end()}; return List(Compound::create(TK_LIST, range, std::move(type_erased_sub))); } static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) { return List(Compound::create(TK_LIST, range, std::move(subtrees))); } size_t size() const { return tree_->trees().size(); } }; template <typename T> struct Maybe : public TreeView { explicit Maybe(const TreeRef& tree) : TreeView(tree) { tree_->match(TK_OPTION); if (tree_->trees().size() > 1) throw(ErrorReport(tree) << "Maybe trees can have at most one subtree"); } /* implicit */ Maybe(const T& tree) : TreeView(tree) {} bool present() const { return tree_->trees().size() > 0; } T get() const { return T(tree_->trees().at(0)); } TreeRef map(const std::function<TreeRef(const T&)>& fn) { return tree_->map([&](TreeRef v) { return fn(T(v)); }); } static Maybe<T> create(const SourceRange& range) { return Maybe<T>(Compound::create(TK_OPTION, range, {})); } static Maybe<T> create(const SourceRange& range, const T& value) { return Maybe<T>(Compound::create(TK_OPTION, range, {value})); } }; struct Ident : public TreeView { explicit Ident(const TreeRef& tree) : TreeView(tree) { tree_->match(TK_IDENT); } const std::string& name() const { return subtree(0)->stringValue(); } static Ident create(const SourceRange& range, std::string name) { return Ident( Compound::create(TK_IDENT, range, {String::create(std::move(name))})); } }; //////////////////////////////////////////////////////////////////////////////// // Base types (production LHS) //////////////////////////////////////////////////////////////////////////////// struct Stmt : public TreeView { explicit Stmt(const TreeRef& tree) : TreeView(tree) { switch (tree->kind()) { case TK_IF: case TK_FOR: case TK_WHILE: case TK_GLOBAL: case TK_ASSIGN: case TK_AUG_ASSIGN: case TK_RETURN: case TK_EXPR_STMT: case TK_RAISE: case TK_ASSERT: case TK_PASS: case TK_BREAK: case TK_DELETE: case TK_CONTINUE: case TK_DEF: case TK_WITH: return; default: throw( ErrorReport(tree) << kindToString(tree->kind()) << " is not a valid Stmt"); } } }; struct Expr : public TreeView { explicit Expr(const TreeRef& tree) : TreeView(tree) { switch (tree->kind()) { case TK_IF_EXPR: case TK_AND: case TK_OR: case '<': case '>': case TK_IS: case TK_ISNOT: case TK_EQ: case TK_LE: case TK_GE: case TK_NE: case '+': case '-': case TK_UNARY_MINUS: case '~': case '*': case TK_STARRED: case '/': case '%': case TK_NOT: case TK_CONST: case TK_STRINGLITERAL: case TK_TRUE: case TK_FALSE: case TK_NONE: case TK_NONE_TYPE: case TK_CAST: case TK_APPLY: case '.': case TK_SUBSCRIPT: case TK_SLICE_EXPR: case TK_VAR: case TK_LIST_LITERAL: case TK_TUPLE_LITERAL: case TK_DICT_LITERAL: case '@': case TK_POW: case TK_LSHIFT: case TK_RSHIFT: case TK_FLOOR_DIV: case '&': case '^': case '|': case TK_LIST_COMP: case TK_DICT_COMP: case TK_DOTS: case TK_IN: case TK_WITH_ITEM: return; default: throw( ErrorReport(tree) << kindToString(tree->kind()) << " is not a valid Expr"); } } }; //////////////////////////////////////////////////////////////////////////////// // Helper nodes (mostly for function arguments) //////////////////////////////////////////////////////////////////////////////// struct Attribute : public TreeView { explicit Attribute(const TreeRef& tree) : TreeView(tree) { tree_->match(TK_ATTRIBUTE); } Ident name() const { return Ident(subtree(0)); } Expr value() const { return Expr(subtree(1)); } static Attribute create( const SourceRange& range, const Ident& name, const TreeRef& value) { return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value})); } }; struct Param : public TreeView { explicit Param(const TreeRef& tree) : TreeView(tree) { tree_->match(TK_PARAM); } static Param create( const SourceRange& range, const Ident& ident, const Maybe<Expr>& type, const Maybe<Expr>& def, bool kwarg_only) { TreeRef kwarg_only_tree = Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {}); return Param(Compound::create( TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)})); } Ident ident() const { return Ident(subtree(0)); } Maybe<Expr> type() const { return Maybe<Expr>(subtree(1)); } Maybe<Expr> defaultValue() const { return Maybe<Expr>(subtree(2)); } bool kwarg_only() const { return TK_TRUE == subtree(3)->kind(); } Param withType(const Maybe<Expr>& typ) const { return Param::create(range(), ident(), typ, defaultValue(), kwarg_only()); } }; //////////////////////////////////////////////////////////////////////////////// // Top level definitions //////////////////////////////////////////////////////////////////////////////// struct Decl : public TreeView { explicit Decl(const TreeRef& tree) : TreeView(tree) { tree->match(TK_DECL); } List<Param> params() const { return List<Param>(subtree(0)); } Maybe<Expr> return_type() const { return Maybe<Expr>(subtree(1)); } static Decl create( const SourceRange& range, const List<Param>& params, const Maybe<Expr>& return_type) { return Decl(Compound::create(TK_DECL, range, {params, return_type})); } }; struct Def : public TreeView { explicit Def(const TreeRef& tree) : TreeView(tree) { tree->match(TK_DEF); } Def withName(std::string new_name) const { auto new_ident = Ident::create(name().range(), std::move(new_name)); return create(range(), new_ident, decl(), statements()); } Def withDecl(const Decl& decl) const { return create(range(), name(), decl, statements()); } Ident name() const { return Ident(subtree(0)); } Decl decl() const { return Decl(subtree(1)); } List<Stmt> statements() const { return List<Stmt>(subtree(2)); } static Def create( const SourceRange& range, const Ident& name, const Decl& decl, const List<Stmt>& stmts) { return Def(Compound::create(TK_DEF, range, {name, decl, stmts})); } }; // Property represents a named attribute combined with a getter and setter // method to access and mutate that attribute. struct Property : public TreeView { explicit Property(const TreeRef& tree) : TreeView(tree) { tree->match(TK_PROP); } Ident name() const { return Ident(subtree(0)); } Def getter() const { return Def(subtree(1)); } Maybe<Def> setter() const { return Maybe<Def>(subtree(2)); } static Property create( const SourceRange& range, const Ident& name, const Def& getter, const Maybe<Def>& setter) { return Property(Compound::create(TK_PROP, range, {name, getter, setter})); } }; struct Assign; struct ClassDef : public TreeView { explicit ClassDef(const TreeRef& tree) : TreeView(tree) { tree->match(TK_CLASS_DEF); } explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) { tree_->match(TK_CLASS_DEF); } ClassDef withName(std::string new_name) const { auto new_ident = Ident::create(name().range(), std::move(new_name)); return create(range(), new_ident, superclass(), body()); } Ident name() const { return Ident(subtree(0)); } Maybe<Expr> superclass() const { return Maybe<Expr>(subtree(1)); } List<Stmt> body() const { return List<Stmt>(subtree(2)); } Maybe<List<Property>> properties() const { return Maybe<List<Property>>(subtree(3)); } Maybe<List<Assign>> assigns() const { return Maybe<List<Assign>>(subtree(4)); } static ClassDef create( const SourceRange& range, const Ident& name, const Maybe<Expr>& superclass, const List<Stmt>& body) { return ClassDef(Compound::create( TK_CLASS_DEF, range, {name, superclass, body, Maybe<List<Property>>::create(range), Maybe<List<Assign>>::create(range)})); } static ClassDef create( const SourceRange& range, const Ident& name, const Maybe<Expr>& superclass, const List<Stmt>& body, const List<Property>& properties, const List<Assign>& assigns); }; TORCH_API std::vector<std::string> getUnresolvedClassAttributes( const ClassDef& def); //////////////////////////////////////////////////////////////////////////////// // Statements //////////////////////////////////////////////////////////////////////////////// struct If : public Stmt { explicit If(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_IF); } Expr cond() const { return Expr(subtree(0)); } List<Stmt> trueBranch() const { return List<Stmt>(subtree(1)); } List<Stmt> falseBranch() const { return List<Stmt>(subtree(2)); } If withNewBranches( const List<Stmt>& true_branch, const List<Stmt>& false_branch) const { return create(range(), cond(), true_branch, false_branch); } static If create( const SourceRange& range, const Expr& cond, const List<Stmt>& true_branch, const List<Stmt>& false_branch) { return If( Compound::create(TK_IF, range, {cond, true_branch, false_branch})); } }; struct While : public Stmt { explicit While(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_WHILE); } Expr cond() const { return Expr(subtree(0)); } List<Stmt> body() const { return List<Stmt>(subtree(1)); } static While create( const SourceRange& range, const Expr& cond, const List<Stmt>& body) { return While(Compound::create(TK_WHILE, range, {cond, body})); } }; struct For : public Stmt { explicit For(const TreeRef& tree) : Stmt(tree) { tree->match(TK_FOR); } List<Expr> targets() const { return List<Expr>(subtree(0)); } List<Expr> itrs() const { return List<Expr>(subtree(1)); } List<Stmt> body() const { return List<Stmt>(subtree(2)); } static For create( const SourceRange& range, const List<Expr>& targets, const List<Expr>& itrs, const List<Stmt>& body) { return For(Compound::create(TK_FOR, range, {targets, itrs, body})); } }; // TODO: supports only single comprehension for now struct ListComp : public Expr { explicit ListComp(const TreeRef& tree) : Expr(tree) { tree->match(TK_LIST_COMP); } Expr elt() const { return Expr(subtree(0)); } Expr target() const { return Expr(subtree(1)); } Expr iter() const { return Expr(subtree(2)); } // TODO: no ifs for now static ListComp create( const SourceRange& range, const Expr& elt, const Expr& target, const Expr& iter) { return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter})); } }; // TODO: supports only single comprehension for now struct DictComp : public Expr { explicit DictComp(const TreeRef& tree) : Expr(tree) { tree->match(TK_DICT_COMP); } Expr key() const { return Expr(subtree(0)); } Expr value() const { return Expr(subtree(1)); } Expr target() const { return Expr(subtree(2)); } Expr iter() const { return Expr(subtree(3)); } // TODO: no ifs for now static DictComp create( const SourceRange& range, const Expr& key, const Expr& value, const Expr& target, const Expr& iter) { return DictComp( Compound::create(TK_DICT_COMP, range, {key, value, target, iter})); } }; struct Global : public Stmt { explicit Global(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_GLOBAL); } List<Ident> names() { return List<Ident>(subtree(0)); } static Global create(const SourceRange& range, const List<Ident>& names) { return Global(Compound::create(TK_GLOBAL, range, {names})); } }; struct AugAssignKind : public TreeView { explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) { switch (tree->kind()) { case '+': case '-': case '*': case '/': case '%': case '|': case '&': case '^': case TK_POW: case TK_LSHIFT: case TK_RSHIFT: return; default: throw(ErrorReport(tree) << "is not a valid AugAssignKind"); } } }; // Augmented assignment, like "foo += bar" struct AugAssign : public Stmt { explicit AugAssign(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_AUG_ASSIGN); } static AugAssign create( const SourceRange& range, const Expr& lhs, const AugAssignKind& aug_op, const Expr& rhs) { return AugAssign( Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs})); } Expr lhs() const { return Expr(subtree(0)); } int aug_op() const { return subtree(1)->kind(); } Expr rhs() const { return Expr(subtree(2)); } }; struct Assign : public Stmt { explicit Assign(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_ASSIGN); } static Assign create( const SourceRange& range, const List<Expr>& lhs, const Maybe<Expr>& rhs, const Maybe<Expr>& type) { return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type})); } List<Expr> lhs_list() const { return List<Expr>(subtree(0)); } Expr lhs() const { const auto& li = lhs_list(); TORCH_INTERNAL_ASSERT(li.size() == 1); return *li.begin(); } Maybe<Expr> rhs() const { return Maybe<Expr>(subtree(1)); } Maybe<Expr> type() const { return Maybe<Expr>(subtree(2)); } }; struct Return : public Stmt { explicit Return(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_RETURN); } Expr expr() const { return Expr(subtree(0)); } static Return create(const SourceRange& range, const Expr& value) { return Return(Compound::create(TK_RETURN, range, {value})); } }; struct Raise : public Stmt { explicit Raise(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_RAISE); } Expr expr() const { return Expr(subtree(0)); } static Raise create(const SourceRange& range, const Expr& expr) { return Raise(Compound::create(TK_RAISE, range, {expr})); } }; struct Assert : public Stmt { explicit Assert(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_ASSERT); } Expr test() const { return Expr(subtree(0)); } Maybe<Expr> msg() const { return Maybe<Expr>(subtree(1)); } static Assert create( const SourceRange& range, const Expr& test, const Maybe<Expr>& msg) { return Assert(Compound::create(TK_ASSERT, range, {test, msg})); } }; struct Pass : public Stmt { explicit Pass(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_PASS); } static Pass create(const SourceRange& range) { return Pass(Compound::create(TK_PASS, range, {})); } }; struct Dots : public Expr { explicit Dots(const TreeRef& tree) : Expr(tree) { tree_->match(TK_DOTS); } static Dots create(const SourceRange& range) { return Dots(Compound::create(TK_DOTS, range, {})); } }; struct Break : public Stmt { explicit Break(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_BREAK); } static Break create(const SourceRange& range) { return Break(Compound::create(TK_BREAK, range, {})); } }; struct Continue : public Stmt { explicit Continue(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_CONTINUE); } static Continue create(const SourceRange& range) { return Continue(Compound::create(TK_CONTINUE, range, {})); } }; struct ExprStmt : public Stmt { explicit ExprStmt(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_EXPR_STMT); } Expr expr() { return Expr(subtree(0)); } static ExprStmt create(const SourceRange& range, const Expr& list) { return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list})); } }; //////////////////////////////////////////////////////////////////////////////// // Expressions //////////////////////////////////////////////////////////////////////////////// struct BinOp : public Expr { explicit BinOp(const TreeRef& tree) : Expr(tree) { switch (tree->kind()) { case TK_AND: case TK_OR: case '<': case '>': case TK_IS: case TK_ISNOT: case TK_EQ: case TK_LE: case TK_GE: case TK_NE: case '+': case '*': case '/': case '-': case '@': case TK_POW: case TK_LSHIFT: case TK_RSHIFT: case '%': case '&': case '^': case '|': case TK_FLOOR_DIV: case TK_IN: if (tree->trees().size() != 2) throw( ErrorReport(tree) << "BinOp expected 2 subtrees, found " << tree->trees().size()); return; default: throw( ErrorReport(tree) << kindToString(tree->kind()) << " is not a valid BinOp"); } } Expr lhs() const { return Expr(subtree(0)); } Expr rhs() const { return Expr(subtree(1)); } static BinOp create( const SourceRange& range, int kind, const Expr& lhs, const Expr& rhs) { return BinOp(Compound::create(kind, range, {lhs, rhs})); } }; struct UnaryOp : public Expr { explicit UnaryOp(const TreeRef& tree) : Expr(tree) { switch (tree->kind()) { case TK_UNARY_MINUS: case '~': case TK_NOT: if (tree->trees().size() != 1) throw( ErrorReport(tree) << "UnaryOp expected 1 subtree, found " << tree->trees().size()); return; default: throw( ErrorReport(tree) << kindToString(tree->kind()) << " is not a valid UnaryOp"); } } static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) { return UnaryOp(Compound::create(kind, range, {expr})); } }; struct Const : public Expr { explicit Const(const TreeRef& tree) : Expr(tree) { tree_->matchNumSubtrees(TK_CONST, 1); } bool isFloatingPoint() const { if (isComplex()) return false; bool is_inf = subtree(0)->stringValue() == "inf"; return is_inf || subtree(0)->stringValue().find_first_of(".eE") != std::string::npos; } bool isIntegral() const { return !isFloatingPoint() && !isComplex(); } bool isComplex() const { return subtree(0)->stringValue().find_first_of('j') != std::string::npos; } int64_t asIntegral() const { try { return std::stoll(subtree(0)->stringValue(), nullptr, 0); } catch (const std::out_of_range&) { throw( ErrorReport(range()) << "Integral constant out of range " "(must fit in a signed 64 bit integer)"); } } double asFloatingPoint() const { // We can't pass in nullptr as the dummy pointer gets dereferenced for // Android version of strtod_c(). char* dummy = nullptr; return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy); } c10::complex<double> asComplex() const { char* dummy = nullptr; auto str = subtree(0)->stringValue(); // Complex numbers (a+bj, where a is non-zero) are parsed as an addition // between float/int a and a complex number "bj". When a is 0, a complex // number bj is created as above. So, while parsing the string, we don't // have to worry about the real component of the complex number. auto imag = torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy); return c10::complex<double>(0, imag); } const std::string& text() const { return subtree(0)->stringValue(); } static Const create(const SourceRange& range, const std::string& value) { return Const(Compound::create(TK_CONST, range, {String::create(value)})); } }; struct StringLiteral : public Expr { explicit StringLiteral(const TreeRef& tree) : Expr(tree) { tree_->matchNumSubtrees(TK_STRINGLITERAL, 1); } const std::string& text() const { return subtree(0)->stringValue(); } static StringLiteral create( const SourceRange& range, const std::string& value) { return StringLiteral( Compound::create(TK_STRINGLITERAL, range, {String::create(value)})); } }; struct Apply : public Expr { explicit Apply(const TreeRef& tree) : Expr(tree) { tree_->match(TK_APPLY); } Expr callee() const { return Expr(subtree(0)); } List<Expr> inputs() const { return List<Expr>(subtree(1)); } List<Attribute> attributes() const { return List<Attribute>(subtree(2)); } static Apply create( const SourceRange& range, const Expr& callee, const List<Expr>& inputs, const List<Attribute>& attributes) { return Apply( Compound::create(TK_APPLY, range, {callee, inputs, attributes})); } }; struct Select : public Expr { explicit Select(const TreeRef& tree) : Expr(tree) { tree_->match('.'); } Expr value() const { return Expr(subtree(0)); } Ident selector() const { return Ident(subtree(1)); } static Select create( const SourceRange& range, const Expr& value, const Ident& selector) { return Select(Compound::create('.', range, {value, selector})); } }; struct SliceExpr : public Expr { explicit SliceExpr(const TreeRef& tree) : Expr(tree) { tree_->match(TK_SLICE_EXPR); } Maybe<Expr> start() const { return Maybe<Expr>(subtree(0)); } Maybe<Expr> end() const { return Maybe<Expr>(subtree(1)); } Maybe<Expr> step() const { return Maybe<Expr>(subtree(2)); } Expr startOr(int64_t alternative) const { const auto startOption = start(); return startOption.present() ? startOption.get() : createInt(alternative); } Expr endOr(int64_t alternative) const { const auto endOption = end(); return endOption.present() ? endOption.get() : createInt(alternative); } Expr stepOr(int64_t alternative) const { const auto stepOption = step(); return stepOption.present() ? stepOption.get() : createInt(alternative); } static SliceExpr create( const SourceRange& range, const Maybe<Expr>& start, const Maybe<Expr>& end, const Maybe<Expr>& step) { return SliceExpr( Compound::create(TK_SLICE_EXPR, range, {start, end, step})); } private: Expr createInt(int64_t value) const { return Expr(Const::create(range(), std::to_string(value))); } }; struct Subscript : public Expr { explicit Subscript(const TreeRef& tree) : Expr(tree) { tree_->match(TK_SUBSCRIPT); } Expr value() const { return Expr(subtree(0)); } List<Expr> subscript_exprs() const { return List<Expr>(subtree(1)); } static Subscript create( const SourceRange& range, const Expr& value, const List<Expr>& subscript_exprs) { auto whole_range = SourceRange( range.source(), range.start(), subscript_exprs.range().end() + 1); return Subscript( Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs})); } }; struct Var : public Expr { explicit Var(const TreeRef& tree) : Expr(tree) { tree_->match(TK_VAR); } Ident name() const { return Ident(subtree(0)); } static Var create(const SourceRange& range, const Ident& name) { return Var(Compound::create(TK_VAR, range, {name})); } }; // WithItem represents an item using with a WithStmt. struct WithItem : public Expr { explicit WithItem(const TreeRef& tree) : Expr(tree) { tree_->match(TK_WITH_ITEM); } Expr target() const { return Expr(subtree(0)); } Maybe<Var> var() const { return Maybe<Var>(subtree(1)); } static WithItem create( const SourceRange& range, const Expr& target, const Maybe<Var>& var) { return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var})); } }; // With represents a with statement consisting of a list of with items and a // body of statements. struct With : public Stmt { explicit With(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_WITH); } List<WithItem> targets() const { return List<WithItem>(subtree(0)); } List<Stmt> body() const { return List<Stmt>(subtree(1)); } static With create( const SourceRange& range, const List<WithItem>& targets, const List<Stmt>& body) { return With(Compound::create(TK_WITH, range, {targets, body})); } }; struct TernaryIf : public Expr { explicit TernaryIf(const TreeRef& tree) : Expr(tree) { tree_->matchNumSubtrees(TK_IF_EXPR, 3); } Expr cond() const { return Expr(subtree(0)); } Expr true_expr() const { return Expr(subtree(1)); } Expr false_expr() const { return Expr(subtree(2)); } static TernaryIf create( const SourceRange& range, const Expr& cond, const Expr& true_expr, const Expr& false_expr) { return TernaryIf( Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr})); } }; struct ListLiteral : public Expr { explicit ListLiteral(const TreeRef& tree) : Expr(tree) { tree_->match(TK_LIST_LITERAL); } List<Expr> inputs() const { return subtree(0); } static ListLiteral create( const SourceRange& range, const List<Expr>& inputs) { return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs})); } }; struct TupleLiteral : public Expr { explicit TupleLiteral(const TreeRef& tree) : Expr(tree) { tree_->match(TK_TUPLE_LITERAL); } List<Expr> inputs() const { return subtree(0); } static TupleLiteral create( const SourceRange& range, const List<Expr>& inputs) { return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs})); } }; struct DictLiteral : public Expr { explicit DictLiteral(const TreeRef& tree) : Expr(tree) { tree_->match(TK_DICT_LITERAL); } List<Expr> key_inputs() const { return subtree(0); } List<Expr> value_inputs() const { return subtree(1); } static DictLiteral create( const SourceRange& range, const List<Expr>& keys, const List<Expr>& values) { return DictLiteral( Compound::create(TK_DICT_LITERAL, range, {keys, values})); } }; struct Starred : public Expr { explicit Starred(const TreeRef& tree) : Expr(tree) { tree_->match(TK_STARRED); } Expr expr() const { return Expr(subtree(0)); } static Starred create(const SourceRange& range, const Expr& expr) { return Starred(Compound::create(TK_STARRED, range, {expr})); } }; struct Delete : public Stmt { explicit Delete(const TreeRef& tree) : Stmt(tree) { tree_->match(TK_DELETE); } List<Expr> targets() const { return subtree(0); } static Delete create(const SourceRange& range, const List<Expr>& targets) { return Delete(Compound::create(TK_DELETE, range, {targets})); } }; /* * NOTE: transforming PEP 604 union into equivalent union type * * NOTE: Union[int, float] parses into: * <EXPR> expr:(subscript * (variable (ident Union)) * (list * (variable (ident int)) * (variable (ident float)))) * <KIND> subscript * * NOTE: (int | float) parses into: * <EXPR> expr:(| * (variable (ident int)) * (variable (ident float))) * <KIND> | */ inline void _flatten_pep604_union( const torch::jit::Expr& node, std::vector<torch::jit::Expr>* result) { // flatten possibly nested union expressions like (int | (float | str)) // into a flat list of expressions like [int, float, str] if (node.kind() == '|') { auto as_binop = torch::jit::BinOp(node); _flatten_pep604_union(as_binop.lhs(), result); _flatten_pep604_union(as_binop.rhs(), result); } else { result->push_back(node); } } inline std::vector<Expr> get_pep604_union_members(const Expr& node) { std::vector<Expr> result; _flatten_pep604_union(node, &result); return result; } // Flattens a PEP 604 union into a classical union. // For example, ((x | y) | z) is transformed into Union[x, y, z]. inline Expr pep604union_to_union(const Expr& expr) { // noop if not a pep604 union if (expr.kind() != '|') return expr; // In order to support unions with more than 2 operands ((x|y)|z), we need to // recursively flatten the tree of | expressions. auto members = get_pep604_union_members(expr); auto synthesised_union = Subscript::create( expr.range(), Var::create(expr.range(), Ident::create(expr.range(), "Union")), List<Expr>::create(expr.range(), members)); #if defined(__clang__) return std::move(synthesised_union); #else return synthesised_union; #endif } } // namespace torch::jit namespace std { template <typename T> struct iterator_traits<torch::jit::ListIterator<T>> : std::iterator_traits<torch::jit::TreeList::const_iterator> {}; } // namespace std ```
================================================================================================================================================= SOURCE CODE FILE: versioned_symbols.h LINES: 1 SIZE: 0.60 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\frontend\versioned_symbols.h ENCODING: utf-8 ```h #pragma once #include <caffe2/serialize/versions.h> #include <torch/csrc/Export.h> #include <torch/csrc/jit/api/module.h> #include <cstdint> namespace torch::jit { // Maps the given symbol into an implementation of its behavior at the // given version. // See note [Versioned Symbols] TORCH_API Symbol get_symbol_for_version(const Symbol name, const uint64_t version); // Maps the given kind to the minimum version that supports it. // See note [Dynamic Versions and torch.jit.save vs. torch.save] TORCH_API uint64_t get_min_version_for_kind(const NodeKind& kind); } // namespace torch::jit ```
======================================================================================================================================== SOURCE CODE FILE: alias_analysis.h LINES: 1 SIZE: 12.84 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\alias_analysis.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/alias_info.h> #include <c10/util/flat_hash_map.h> #include <torch/csrc/jit/ir/ir.h> #include <torch/csrc/jit/ir/type_hashing.h> #include <torch/csrc/jit/passes/create_functional_graphs.h> #include <torch/csrc/jit/passes/utils/memory_dag.h> namespace torch::jit { /** * Alias analysis pass. * * This pass produces an AliasDb that contains aliasing and mutation * information about the graph. Users can use this information to determine * whether mutations to the graph are safe, i.e. they don't reorder/change * nodes in a way that affects output. * * Every value with a mutable type (Tensors, Lists, Tuples, etc.) will be * associated with one or more "alias sets". If two values share an alias set, * that means they may alias, implying that a mutation to one value cannot be * reordered past a use of the other. Only reordering two reads of an alias set * is considered safe. * * There is a special alias set called the "wildcard set", which indicates that * we're not sure what this value may alias. To be conservative, we consider the * wildcard alias set as potentially aliasing any other wildcard value within * the same type class. Whenever a value becomes contained by another value, * such as when a Tensor is appended to a List[Tensor], the contained element * becomes part of the wildcard set. * * Values that contain other mutable types, such as List[Tensor], are * initialized as containing the Wildcard set for all contained mutable types. * * The AliasDb API references the idea of "mutable" vs "immutable" * types. "Mutable" means that the object's value can change, while * "immutable" means that the value is fixed. (For example, `List` is * mutable, so you can add and delete elements from it. On the other * hand, you can't modify a Tuple once you create it, making `Tuple` an * immutable container.) * * `isFrozen` - if the Module is frozen then consider attributes as freshly * created objects. Freezing API invokes alias analysis to check if they are * mutated internally. * * `descendFunctionCalls` - recursively analyze function and method calls * instead of conservative analysis. Generally analysis should be done after * inlining so the implmentation for recursive analysis is unoptimized. */ class AliasDb { public: TORCH_API explicit AliasDb( std::shared_ptr<Graph> graphi, bool isFrozen = false, bool descendFunctionCalls = false); TORCH_API ~AliasDb(); // There are limitations to what effects the alias analysis can track. Two // kinds of nodes may have untracked effects: // 1. Nodes that write to a value that may alias the graph inputs (since // the inputs can be used outside the graph). // 2. Nodes that write to something in the wildcard set. // // These nodes are considered not safe to eliminate or mutate under any // circumstances. bool writesToWildcard(Node* n) const; // Does `n` write to an alias of one of the values in `vs`? // if `recurseBlocks` is true, consider writes on the nodes in `n`s sub-blocks TORCH_API bool writesToAlias(Node* n, const ValueSet& vs) const; // Does `a` and `b` potentially share a memory location or do either // hold in memory any element that exists in the other TORCH_API bool mayContainAlias(Value* a, Value* b) const; TORCH_API bool mayContainAlias(Value* a, const at::ArrayRef<Value*> b) const; // Do any values in group `a` share a memory location or hold in memory // any element that exists in group `b` TORCH_API bool mayContainAlias( const at::ArrayRef<Value*> a, const at::ArrayRef<Value*> b) const; // Do `a` and `b` potentially share a memory location? TORCH_API bool mayAlias(const Value* a, const Value* b) const; // Do any values in group `a` potentially share a memory location with any // value in group `b`? i.e. may they overlap? TORCH_API bool mayAlias(const ValueSet& a, const ValueSet& b) const; // Do any nodes write to an alias set input to `n`? TORCH_API bool hasInputWriters(const Node* n) const; // Do any nodes write to an alias set output by `n`? TORCH_API bool hasOutputWriters(const Node* n) const; // Do any nodes write to an alias set inputed/outputed by `n`? TORCH_API bool hasWriters(const Node* n) const; // Do any nodes write to `v`s memory location? TORCH_API bool hasWriters(const Value* v) const; // Is the operation in-place? i.e. doesn't write anywhere but locations it // reads from. TORCH_API bool isMutable(Node* n) const; TORCH_API bool escapesScope(const at::ArrayRef<Value*>& vs) const; // Is it safe to change whether `a` and `b` alias each other ? TORCH_API bool safeToChangeAliasingRelationship( const at::ArrayRef<Value*>& a, const at::ArrayRef<Value*>& b) const; // Move `n` (already in the graph) after `movePoint` in the topological order. // // Tries to preserve value dependencies, so other nodes might be moved. We // make two guarantees about the postcondition of the node list: // - `n` is directly after `movePoint`. // - only nodes between `n` and `movePoint` have been moved. // // Returns `false` if it's impossible to move `n` after `MovePoint` without // violating dependencies, otherwise executes the move and returns `true` TORCH_API bool moveAfterTopologicallyValid(Node* n, Node* movePoint); TORCH_API bool moveBeforeTopologicallyValid(Node* n, Node* movePoint); bool couldMoveAfterTopologically(Node* n, Node* movePoint); bool couldMoveBeforeTopologically(Node* n, Node* movePoint); // For debugging: print alias db state to stdout TORCH_API void dump() const; TORCH_API std::string toString() const; // Generates a DOT (www.graphviz.org) graph representation // // Returns `true` if the output file was successfully generated // // WARNING: The output dot file path can't include shell specific notations, // for example you can't use "~/temp/aliasdb.dot" // (instead, use "/home/user/temp/aliasdb.dot") // TORCH_API bool dumpToGraphvizFile(const char* filename) const; TORCH_API std::string toGraphviz() const; // Returns `true` if the given element is mutable or if it is a // container type with an internal mutable element (e.g. // `Tuple[int, Tensor]` has an internal mutable type `Tensor`, so // it would be considered a "mutable type" in AliasDb) static bool isMutableType(const Value* v); static bool isMutableType(const TypePtr& type); /** * Mutation API * * These methods allow you to update AliasDb in-place if you are performing * graph mutation. * * WARNING: These methods should be considered INTERNAL. They do not perform * very many correctness checks, the user is responsible for making sure they * are updating AliasDb correctly. `Lint()`ing the AliasDb can help with * this. */ // Copy `existing`s aliasing info to `new_value`, and remove `existing`. TORCH_API void replaceWithNewValue(Value* existing, Value* new_value); // Copy `from`s aliasing info to `to`. TORCH_API void copyValue(Value* from, Value* to); // Create a new `value` that does not alias anything else. TORCH_API void createValue(const Value* value); // Enable more precise treatment of prim::TupleConstruct. void enablePreciseTupleContainerAnalysis(); friend struct MutationRemover; private: // Helper for topologically-safe node moves. class WorkingSet; enum class MoveSide { BEFORE, AFTER }; bool tryMove(Node* toMove, Node* movePoint, MoveSide moveSide, bool dryRun); void move(Node* toMove, Node* movePoint, MoveSide moveSide); bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const; bool isMutableTypeInternal(const Value* v) const; bool isMutableTypeInternal(const TypePtr& type) const; /** * Write and read internal API */ // Get all the values that `n` writes to. // NOTE: this only returns values directly written to, not aliases thereof // // if `recurseBlocks` is true, gather writes on the nodes in `n`s sub-blocks MemoryLocations getWrites(Node* n) const; void getWritesImpl(Node* n, MemoryLocations& ret) const; // Register the fact that `n` writes to `v`. void registerWrite(const Value* v, Node* n, bool writeToContained = false); // Get all the values that `n` reads from. // if `recurseBlocks` is true, gather reads on the nodes in `n`s sub-blocks MemoryLocations getReads(Node* n) const; void getReadsImpl(Node* n, MemoryLocations& ret) const; /** * Wildcard methods */ // Register `v` as a wildcard value. std::optional<Element*> setWildcard(const Value* v); // Is this a value which will not alias? bool nonAliasingValue(const Value* elem) const; /** * Special analysis methods */ void analyze(const std::shared_ptr<Graph>& graph); void analyze(Block* block); void analyze(Node* node); void analyzeImpl(Node* node); void analyzeIf(Node* node); void analyzeLoop(Node* node); void analyzeSubgraph(Node* node, const std::shared_ptr<Graph>& subgraph); void analyzeSubgraph(Node* node); void analyzeCreator(Node* node); void analyzeExtractor(Node* node); void analyzeChunk(Node* node); void analyzeBroadcastingChunk(Node* node); void analyzeFork(Node* node); void analyzeWait(Node* node); void analyzeAwaitable(Node* node); void analyzeAwaitableWait(Node* node); void analyzeRpcAsync(Node* node); void analyzeBatchNorm(Node* node); void analyzeInstanceNorm(Node* node); void analyzeGradOf(Node* node); void analyzeSetAttr(Node* node); void analyzeConservative(Node* node); void analyzeContainerConstruct(Node* node); bool tryRegisteredAnalysis(Node* node); /** * Alias manipulation methods */ void makeAllAlias(const std::vector<Value*>& values); void makePointerTo(const Value* value, const Value* to); TORCH_API void addToContainedElements( const Value* element, const Value* container); void mapAliases(at::ArrayRef<Value*> to, at::ArrayRef<Value*> from); void giveFreshAlias( const Value* value, bool add_wildcard_to_contained_elems = true); Element* getOrCreateElement(const Value* value); const AliasTypeSet* mapTypeToAliasTypeSetPtr(const TypePtr& type) const; bool functionalNonEscapingListUse(const Use& use) const; bool functionalNonEscapingTupleUse(const Use& use) const; std::shared_ptr<Graph> graph_; // If the Module is frozen then consider attributes as freshly created // objects. Freezing API invokes alias analysis to check if they are mutated // internally. bool isFrozen_; bool descend_function_calls_; std::unordered_map<Graph*, std::vector<std::shared_ptr<Graph>>> function_call_copies_; // The points-to graph that stores aliasing relationships std::unique_ptr<MemoryDAGBuilder> memoryDAGBuilder_; std::unique_ptr<MemoryDAG> memoryDAG_; // Mapping of values to MemoryDAG elements ska::flat_hash_map<const Value*, Element*> elementMap_; // All wildcard Elements (one for each unique mutable type) ska::flat_hash_map<TypePtr, Element*, HashType, EqualType> wildcardIndex_; Element* getWildcard(const TypePtr& type) const; std::optional<Element*> tryGetOrCreateWildcard(const TypePtr& type); void addContainedTypesToFreshElement( Element* container_elem, const AliasTypeSet& mut_types); void pointUnionTypeElementToAllContainedTypes( Element* container_elem, const AliasTypeSet& mut_types); std::vector<Element*> getElements(at::ArrayRef<Value*> vs) const; bool mayAliasWildcard(const Value* v) const; bool mayAliasWildcard(const at::ArrayRef<Value*> vs) const; bool hasWriters(const at::ArrayRef<Value*>& values) const; // Cached mapping of type ptrs to their mutable types mutable ska::flat_hash_map<TypePtr, AliasTypeSet> mapped_mutable_types_; /** * State for tracking write info. */ // Write registry where the analysis can record the writes as it sees them. // This information is later denormalized into various caches to improve query // efficiency. struct WriteRegistry; std::unique_ptr<WriteRegistry> writeRegistry_; // Map of nodes to the memory locations that they write to using TWriteIndex = ska::flat_hash_map<Node*, MemoryLocations>; std::optional<TWriteIndex> writeIndex_; // Collection of all memory locations that are written to. std::optional<MemoryLocations> writtenToLocationsIndex_; void buildWrittenToLocationsIndex(); std::unordered_set<const Value*> wildcards_; std::string getElementName(const Element* e) const; friend void Lint(const AliasDb* db); }; // Helper check that invariants over AliasDb are maintained. // Useful if you are using the AliasDb mutation API and want to check you did // the right thing. TORCH_API void Lint(const AliasDb* db); } // namespace torch::jit ```
==================================================================================================================================== SOURCE CODE FILE: attributes.h LINES: 1 SIZE: 4.97 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\attributes.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/Tensor.h> #include <string> #include <vector> #include <ATen/core/jit_type_base.h> #include <ATen/core/symbol.h> #include <torch/csrc/Export.h> namespace torch::jit { using ::c10::Symbol; constexpr int max_tensor_display_size = 10; enum class AttributeKind { f, fs, c, cs, i, is, s, ss, t, ts, g, gs, ty, tys, ival }; static inline const char* toString(AttributeKind kind) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) static const char* names[] = { "f", "c", "cs", "fs", "i", "is", "s", "ss", "t", "ts", "g", "gs", "ty", "tys", "ival"}; AT_ASSERT(size_t(kind) < sizeof(names) / sizeof(*names)); return names[int(kind)]; } struct AttributeValue { AttributeValue(Symbol name) : name(name) {} using Ptr = std::unique_ptr<AttributeValue>; Symbol name; virtual AttributeKind kind() const = 0; virtual Ptr clone() const = 0; virtual ~AttributeValue() = default; }; template <typename T, AttributeKind Kind> struct ScalarAttributeValue : public AttributeValue { using ConstructorType = T; using ValueType = T; ScalarAttributeValue(Symbol name, ConstructorType value_) : AttributeValue(name), value_(std::move(value_)) {} ValueType& value() { return value_; } Ptr clone() const override { return Ptr(new ScalarAttributeValue(name, value_)); } AttributeKind kind() const override { return Kind; } private: ValueType value_; }; template <typename T, AttributeKind Kind> struct VectorAttributeValue : public AttributeValue { using ConstructorType = std::vector<T>; using ValueType = std::vector<T>; // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) VectorAttributeValue(Symbol name, ConstructorType value_) : AttributeValue(name), value_(std::move(value_)) {} ValueType& value() { return value_; } AttributeKind kind() const override { return Kind; } std::unique_ptr<AttributeValue> clone() const override { auto copy = value_; return Ptr(new VectorAttributeValue(name, std::move(copy))); } private: ValueType value_; }; using ComplexAttr = ScalarAttributeValue<c10::complex<double>, AttributeKind::c>; using ComplexValsAttr = VectorAttributeValue<c10::complex<double>, AttributeKind::cs>; using FloatAttr = ScalarAttributeValue<double, AttributeKind::f>; using FloatsAttr = VectorAttributeValue<double, AttributeKind::fs>; using IntAttr = ScalarAttributeValue<int64_t, AttributeKind::i>; using IntsAttr = VectorAttributeValue<int64_t, AttributeKind::is>; using StringAttr = ScalarAttributeValue<std::string, AttributeKind::s>; using StringsAttr = VectorAttributeValue<std::string, AttributeKind::ss>; using TensorAttr = ScalarAttributeValue<at::Tensor, AttributeKind::t>; using TensorsAttr = VectorAttributeValue<at::Tensor, AttributeKind::ts>; using TypeAttr = ScalarAttributeValue<c10::TypePtr, AttributeKind::ty>; using TypesAttr = VectorAttributeValue<c10::TypePtr, AttributeKind::tys>; using IValueAttr = ScalarAttributeValue<at::IValue, AttributeKind::ival>; struct Graph; // We special case Graph attributes like this because we want to ensure that // Graph::copy() is called when we clone() these attributes. struct TORCH_API GraphAttr : public AttributeValue { using ConstructorType = std::shared_ptr<Graph>; using ValueType = std::shared_ptr<Graph>; GraphAttr(Symbol name, ConstructorType value_) : AttributeValue(name), value_(std::move(value_)) {} ValueType& value() { return value_; } Ptr clone() const override; AttributeKind kind() const override { return AttributeKind::g; } private: std::shared_ptr<Graph> value_; }; struct TORCH_API GraphsAttr : public AttributeValue { using ConstructorType = std::vector<std::shared_ptr<Graph>>; using ValueType = std::vector<std::shared_ptr<Graph>>; // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) GraphsAttr(Symbol name, ConstructorType value_) : AttributeValue(name), value_(std::move(value_)) {} ValueType& value() { return value_; } AttributeKind kind() const override { return AttributeKind::gs; } std::unique_ptr<AttributeValue> clone() const override; private: ValueType value_; }; struct IRAttributeError : public std::exception { IRAttributeError(Symbol name, bool defined) { std::stringstream ss; // NOLINTNEXTLINE(bugprone-branch-clone) if (!defined) { ss << "required keyword attribute '" << name.toUnqualString() << "' is undefined"; } else { ss << "required keyword attribute '" << name.toUnqualString() << "' has the wrong type"; } msg = ss.str(); } const char* what() const noexcept override { return msg.c_str(); } private: std::string msg; }; } // namespace torch::jit ```
=================================================================================================================================== SOURCE CODE FILE: constants.h LINES: 1 SIZE: 2.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\constants.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/ivalue.h> #include <ATen/core/jit_type.h> #include <torch/csrc/Export.h> #include <torch/csrc/jit/frontend/source_range.h> #include <torch/csrc/jit/ir/scope.h> // helpers for handling constants in the IR // - create constant nodes from ints, floats, complex, intlist, Tensors, and // other types // - implement primitive constant ops. namespace torch::jit { using ::c10::IValue; struct Graph; struct Value; // thrown when insertConstant cannot encode the IValue into a graph struct TORCH_API constant_not_supported_error : public std::runtime_error { using runtime_error::runtime_error; }; TORCH_API Value* insertConstant( Graph& g, const IValue& val, std::optional<SourceRange> loc = std::nullopt, std::optional<ScopePtr> scope = std::nullopt); // note: prefer g.insertConsant(val, loc) which does exactly the same thing // this function is only declared/defined here because its implementation is // closely related to the implementation of prim::Constant that is also in // constants.cpp. // // returns a std::nullopt if the IValue kind cannot be inserted as a constant TORCH_API std::optional<Value*> tryInsertConstant( Graph& g, const IValue& val, std::optional<SourceRange> loc = std::nullopt, std::optional<ScopePtr> scope = std::nullopt); //////////////////////////////////////////////////////////////////////////////// // Helper for retrieving constants //////////////////////////////////////////////////////////////////////////////// // attempt to convert a (possibly constant) Value* into an interpreter value // (IValue). returns std::nullopt if the Value* was not constant TORCH_API std::optional<IValue> toIValue(const Value* v); // if a value is a constant then try to turn into type T using the // same rules as the interpreter template <typename T> std::optional<T> constant_as(const Value* v) { if (auto ivalue = toIValue(v)) { return ivalue->to<T>(); } return std::nullopt; } } // namespace torch::jit ```
========================================================================================================================================= SOURCE CODE FILE: graph_node_list.h LINES: 1 SIZE: 6.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\graph_node_list.h ENCODING: utf-8 ```h #pragma once #include <c10/util/Exception.h> namespace torch::jit { // Intrusive doubly linked lists with sane reverse iterators. // The header file is named generic_graph_node_list.h because it is ONLY // used for Graph's Node lists, and if you want to use it for other // things, you will have to do some refactoring. // // At the moment, the templated type T must support a few operations: // // - It must have a field: T* next_in_graph[2] = { nullptr, nullptr }; // which are used for the intrusive linked list pointers. // // - It must have a method 'destroy()', which removes T from the // list and frees a T. // // In practice, we are only using it with Node and const Node. 'destroy()' // needs to be renegotiated if you want to use this somewhere else. // // Regardless of the iteration direction, iterators always physically point // to the element they logically point to, rather than // the off-by-one behavior for all standard library reverse iterators like // std::list. // The list is includes two sentinel nodes, one at the beginning and one at the // end with a circular link between them. It is an error to insert nodes after // the end sentinel node but before the beginning node: // Visualization showing only the next() links: // HEAD -> first -> second -> ... -> last -> TAIL // ^------------------------------------------ // Visualization showing only the prev() links: // HEAD <- first <- second <- ... <- last <- TAIL // ------------------------------------------^ static constexpr int kNextDirection = 0; static constexpr int kPrevDirection = 1; template <typename T> struct generic_graph_node_list; template <typename T> struct generic_graph_node_list_iterator; struct Node; using graph_node_list = generic_graph_node_list<Node>; using const_graph_node_list = generic_graph_node_list<const Node>; using graph_node_list_iterator = generic_graph_node_list_iterator<Node>; using const_graph_node_list_iterator = generic_graph_node_list_iterator<const Node>; template <typename T> struct generic_graph_node_list_iterator { generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {} generic_graph_node_list_iterator(T* cur, int d) : cur(cur), d(d) {} generic_graph_node_list_iterator( const generic_graph_node_list_iterator& rhs) = default; generic_graph_node_list_iterator( generic_graph_node_list_iterator&& rhs) noexcept = default; generic_graph_node_list_iterator& operator=( const generic_graph_node_list_iterator& rhs) = default; generic_graph_node_list_iterator& operator=( generic_graph_node_list_iterator&& rhs) noexcept = default; T* operator*() const { return cur; } T* operator->() const { return cur; } generic_graph_node_list_iterator& operator++() { AT_ASSERT(cur); cur = cur->next_in_graph[d]; return *this; } generic_graph_node_list_iterator operator++(int) { generic_graph_node_list_iterator old = *this; ++(*this); return old; } generic_graph_node_list_iterator& operator--() { AT_ASSERT(cur); cur = cur->next_in_graph[reverseDir()]; return *this; } generic_graph_node_list_iterator operator--(int) { generic_graph_node_list_iterator old = *this; --(*this); return old; } // erase cur without invalidating this iterator // named differently from destroy so that ->/. bugs do not // silently cause the wrong one to be called. // iterator will point to the previous entry after call void destroyCurrent() { T* n = cur; cur = cur->next_in_graph[reverseDir()]; n->destroy(); } generic_graph_node_list_iterator reverse() { return generic_graph_node_list_iterator(cur, reverseDir()); } private: int reverseDir() { return d == kNextDirection ? kPrevDirection : kNextDirection; } T* cur; int d; // direction 0 is forward 1 is reverse, see next_in_graph }; template <typename T> struct generic_graph_node_list { using iterator = generic_graph_node_list_iterator<T>; using const_iterator = generic_graph_node_list_iterator<const T>; generic_graph_node_list_iterator<T> begin() { return generic_graph_node_list_iterator<T>(head->next_in_graph[d], d); } generic_graph_node_list_iterator<const T> begin() const { return generic_graph_node_list_iterator<const T>(head->next_in_graph[d], d); } generic_graph_node_list_iterator<T> end() { return generic_graph_node_list_iterator<T>(head->next_in_graph[!d], d); } generic_graph_node_list_iterator<const T> end() const { return generic_graph_node_list_iterator<const T>( head->next_in_graph[!d], d); } generic_graph_node_list_iterator<T> rbegin() { return reverse().begin(); } generic_graph_node_list_iterator<const T> rbegin() const { return reverse().begin(); } generic_graph_node_list_iterator<T> rend() { return reverse().end(); } generic_graph_node_list_iterator<const T> rend() const { return reverse().end(); } generic_graph_node_list reverse() { return generic_graph_node_list(head->next_in_graph[!d], !d); } const generic_graph_node_list reverse() const { return generic_graph_node_list(head->next_in_graph[!d], !d); } T* front() { return head->next_in_graph[d]; } const T* front() const { return head->next_in_graph[d]; } T* back() { return head->next_in_graph[!d]; } const T* back() const { return head->next_in_graph[!d]; } generic_graph_node_list(T* head, int d) : head(head), d(d) {} private: T* head; // both head and tail are sentinel nodes // the first real node is head->next_in_graph[d] // the tail sentinel is head->next_in_graph[!d] int d; }; template <typename T> static inline bool operator==( generic_graph_node_list_iterator<T> a, generic_graph_node_list_iterator<T> b) { return *a == *b; } template <typename T> static inline bool operator!=( generic_graph_node_list_iterator<T> a, generic_graph_node_list_iterator<T> b) { return *a != *b; } } // namespace torch::jit namespace std { template <typename T> struct iterator_traits<torch::jit::generic_graph_node_list_iterator<T>> { using difference_type = int64_t; using value_type = T*; using pointer = T**; using reference = T*&; using iterator_category = bidirectional_iterator_tag; }; } // namespace std ```
===================================================================================================================================== SOURCE CODE FILE: graph_utils.h LINES: 1 SIZE: 0.51 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\graph_utils.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/ir/ir.h> #include <vector> namespace torch::jit { TORCH_API TypePtr getTensorType(const at::Tensor& t, bool complete); TORCH_API TypePtr inferShapeAndTypeForInput( TypePtr input_type, Stack::const_iterator& s_iter, const Stack::const_iterator& s_iter_end, bool complete); TORCH_API void setInputTensorTypes( Graph& g, const Stack& stack, bool complete, const std::vector<int>& param_count_list = {}); } // namespace torch::jit ```
============================================================================================================================ SOURCE CODE FILE: ir.h LINES: 1 SIZE: 54.71 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\ir.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/jit/ir/attributes.h> #include <torch/csrc/jit/ir/graph_node_list.h> #include <torch/csrc/jit/ir/named_value.h> #include <torch/csrc/jit/ir/scope.h> #include <torch/csrc/jit/runtime/operator.h> #include <torch/csrc/Export.h> #include <torch/csrc/utils/python_stub.h> #include <torch/csrc/utils/schema_info.h> #include <ATen/Utils.h> #include <ATen/core/Tensor.h> #include <ATen/core/dynamic_type.h> #include <ATen/core/enum_type.h> #include <ATen/core/functional.h> #include <ATen/core/interned_strings.h> #include <ATen/core/ivalue.h> #include <ATen/core/jit_type.h> #include <c10/util/ArrayRef.h> #include <c10/util/Exception.h> #include <optional> #include <functional> #include <iosfwd> #include <unordered_set> #include <vector> // Forward declare, the real meat is in python_ir.cpp template <class T> class THPPointer; using THPObjectPtr = THPPointer<PyObject>; using pyobj_list = std::vector<THPObjectPtr>; namespace torch::jit { namespace utils { TORCH_API std::string getNodesModuleHierarchy(const Node& n); } // namespace utils class AliasDb; using ::c10::Argument; using ::c10::FunctionSchema; using ::c10::Symbol; using ::c10::ivalue::Shared; using ::c10::IValue; using ::c10::ivalue::Future; using ::c10::ivalue::ConstantString; #define C10_USING(T) using ::c10::T; C10_FORALL_TYPES(C10_USING) #undef C10_USING #define C10_USING(T) using ::c10::T##Ptr; C10_FORALL_TYPES(C10_USING) #undef C10_USING using ::c10::Type; using ::c10::TypeEnv; using ::c10::TypePtr; using ::c10::getTypePtr; using ::c10::MatchTypeReturn; using ::c10::TypeKind; using ::c10::fmap; namespace prim { using namespace ::c10::prim; } namespace attr { using namespace ::c10::attr; } namespace aten { using namespace ::c10::aten; } namespace cuda { #if !defined(USE_ROCM) using namespace ::c10::cuda; #endif } // namespace cuda struct Function; struct GraphFunction; struct MatchedSchema; // A Graph represents one "function" of computation. // It uses a simple ownership model where the graph owns all the nodes inside // it. All references inside the graph are raw pointers. Destroying the Graph // will invalidate any pointers to nodes in the graph. struct Graph; // Node is the base class of the IR graph. It represents one computation // and dependencies on a list of Values. The "prim-ops", so to speak. struct Node; // A Value represents an input or output to node that is either a // Tensor or an opaque Handle object, as determined by type(). struct Value; TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g); TORCH_API std::ostream& operator<<(std::ostream& out, const Node& n); // A list of nodes, with inputs and outputs struct Block; // Each use is represented by this type, see 'Node::uses()' // 'user' is the consumer of the value, 'offset' is the index into // 'user's input this where the producers will be found. struct Use { Use(Node* user, size_t offset) : user(user), offset(offset) {} Node* user; size_t offset; bool operator==(const Use& b) { return user == b.user && offset == b.offset; } }; // Note [User node does not uniquely identify use] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // A while back, we wrote some code manipulating uses that looked like this: // // for (auto& use : used_val->uses_) { // if (use.user == this_node) { // use.offset += 1; // break; // } // } // // This code is trying to find a particular use (our node's use) to update it. // However, it's wrong: there may be *multiple* uses of a value %x in a node, // as might be the case in this IR: // // %y = Add %x %x // // In this case, there are two uses of %x whose user is the node 'Add %x %x'. // So, "use induced by this node" is not a well-formed concept. // // If you are looking for "use induced by an input", it's best to use // findUseForInput() to get it. // the list types are intentionally simple, but we type-def // them here so if we need to change them, refactoring will be easier using node_list = std::vector<Node*>; using value_list = std::vector<Value*>; using use_list = std::vector<Use>; template <typename T> using ArrayRef = at::ArrayRef<T>; using NodeKind = Symbol; using topo_position_t = int64_t; using ValueSet = std::unordered_set<const Value*>; struct OperatorSet; template <typename T> struct OperatorMap; // This is a wrapper to allow invalidating the Python object // safely when the C++ object for a Node/Value/Block is deleted // like much of graph, it isn't safe for different threads to // access the same graph template <typename T> struct Wrap { explicit Wrap(T* p) : elem(p) {} void clear() { if (clear_cb) { clear_cb(elem); } elem = nullptr; } T* elem; void (*clear_cb)(void*){nullptr}; }; struct Value { AT_DISALLOW_COPY_AND_ASSIGN(Value); Value(Node* node_, size_t offset_); private: friend struct Node; friend struct Graph; Node* node_; size_t offset_; size_t unique_ = 0; // unique id use_list uses_; std::string unique_name_; TypePtr type_; // a managing wrapper for Python to allow invalidation std::shared_ptr<Wrap<Value>> wrap_; public: Value* setType(TypePtr type); TORCH_API void inferTypeFrom(const at::Tensor& output); TORCH_API void inferTypeFrom( const c10::intrusive_ptr<c10::ivalue::Object>& output); const TypePtr& type() const { AT_ASSERT(type_ != nullptr); return type_; } bool requires_grad() const { return type()->requires_grad(); } bool isCompleteTensor() const { if (auto pt = type()->cast<TensorType>()) { return pt->isComplete(); } return false; } TORCH_API bool mustBeNone() const; TORCH_API bool mustNotBeNone() const; size_t unique() const { return unique_; } bool hasDebugName() const { return !unique_name_.empty(); } static bool isValidName(const std::string& name); TORCH_API Value* setDebugName(const std::string& name); std::string debugName() const { if (hasDebugName()) { return unique_name_; } return std::to_string(unique()); } TORCH_API std::string debugNameBase() const; Node* node() { return node_; } size_t offset() const { return offset_; } void setOffset(size_t offset) { offset_ = offset; } const Node* node() const { return node_; } /** * @warning NEVER pass raw pointer of smart pointer managed Graph to Python. * Check #87343 for details. */ Graph* owningGraph(); const Graph* owningGraph() const; // TODO: make this more const correct const use_list& uses() const { return uses_; } bool hasUses() const { return !uses().empty(); } TORCH_API void replaceFirstUseWith(Value* newValue); // Replaces all uses of this value with 'newValue'. // // Given: %3 = f(%1, %2) // %4 = g(%3) // %5 = h(%3, %3) // Execute: %3.replaceAllUsesWith(%6) // Result: %3 = f(%1, %2) // %4 = g(%6) // %5 = h(%6, %6) TORCH_API void replaceAllUsesWith(Value* newValue); // Replaces all uses of this value with 'newValue' after 'node'. // Given: %3 = f(%1, %2) // %4 = g(%3) // %5 = inplace_(%3) // %6 = h(%3, %3) // Execute: %3.replaceAllUsesAfterNodeWith(%5.node(), %5) // Result: %3 = f(%1, %2) // %4 = g(%3) // %5 = inplace_(%3) // %6 = h(%5, %5) // XXX: does not check scoping legality, consider using // replaceAllUsesDominatedByNodeWith TORCH_API void replaceAllUsesAfterNodeWith(const Node* node, Value* newValue); // Replaces all uses of this value with 'newValue' that are dominated by // 'node'. Given: // x = op(...). // if cond: // z = foo(..) // bar(x) // else: // print(x) // x.replaceAllUsesDominatedByNodeWith(foo, z) would replace bar(x) // but not print(x) because print is not dominated by foo. // replaceAllUsesAfterNode does not check domination, so in this example // it would produce invalid IR. TORCH_API void replaceAllUsesDominatedByNodeWith( const Node* node, Value* newValue); TORCH_API Value* copyMetadata(Value* from); TORCH_API std::shared_ptr<Wrap<Value>> wrap() { if (!wrap_) { wrap_ = std::make_shared<Wrap<Value>>(this); } return wrap_; } virtual ~Value() { if (wrap_) { wrap_->clear(); } } }; struct TORCH_API Node { AT_DISALLOW_COPY_AND_ASSIGN(Node); friend struct Graph; friend struct Block; friend struct Value; friend graph_node_list; friend const_graph_node_list; friend graph_node_list_iterator; friend const_graph_node_list_iterator; private: const NodeKind kind_; std::vector<Value*> inputs_; std::vector<Value*> outputs_; // subblocks std::vector<Block*> blocks_; Graph* graph_; Block* owning_block_; std::optional<SourceRange> source_range_; ScopePtr scope_; std::optional<InlinedCallStackPtr> callstack_; // Assumes FunctionSchemas are persistent, so we don't manage their lifetime. // This field is effective a cache that's populated on attribute lookups and // invalidated every time we perform an operation that could potentially // change the schema. note: mutable because schema_ is effectively a cache mutable const Operator* op_; topo_position_t topo_position_ = 0; // a managing wrapper for Python to allow invalidation std::shared_ptr<Wrap<Node>> wrap_; // Stores the full schema name, if the operator is historic // When the operator is deprecated or the name of the operator // is changed, we need to rely on this name // to retrieve old schemas to successfully apply upgraders // for this operator. std::optional<std::string> historic_schema_name_ = std::nullopt; protected: Node(Graph* graph_, NodeKind kind_); // defined after graph public: // Each Node but Return/Param Nodes are associated with exactly one // place in the Node list of the Graph. The Graph itself is a circular // doubly-linked list. The Return Node is used as the sentinel for the // "beginning"/"end" of the list. This means that you can tell when // you've traversed the entire list without means worrying about null // pointers. `next_in_graph[0]` is the pointer to the next Node, while // `next_in_graph[1]` is the pointer to the previous Node. The // linked list is implemented as an array to allow the same iterator // class for forward and reversed Node lists. Taken together, this // list also represents a topological sort of the Nodes in the Graph. // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-non-private-member-variables-in-classes,modernize-avoid-c-arrays) Node* next_in_graph[2] = {nullptr, nullptr}; std::shared_ptr<Wrap<Node>> wrap() { if (!wrap_) { wrap_ = std::make_shared<Wrap<Node>>(this); } return wrap_; } const std::optional<std::string> getHistoricSchemaName() { return historic_schema_name_; } void setHistoricSchemaName(const std::string& name) { historic_schema_name_ = name; } Node*& next() { return next_in_graph[kNextDirection]; } Node*& prev() { return next_in_graph[kPrevDirection]; } Node* const& next() const { return next_in_graph[kNextDirection]; } Node* const& prev() const { return next_in_graph[kPrevDirection]; } NodeKind kind() const { return kind_; } Node* setSourceRange(SourceRange r) { source_range_ = std::move(r); return this; } SourceRange sourceRange() const; /** * @warning NEVER pass raw pointer of smart pointer managed Graph to Python. * Check #87343 for details. */ Graph* owningGraph() { return graph_; } const Graph* owningGraph() const { return graph_; } Block* owningBlock() { return owning_block_; } const Block* owningBlock() const { return owning_block_; } ScopePtr scope() { return scope_; } void setScope(ScopePtr scope) { scope_ = std::move(scope); } std::string scopeName() const { if (!scope_) { return ""; } return scope_->namesFromRoot(); } // Copies the source range, scope and callstack from another node. Node* copyMetadata(Node* from) { this->setSourceRange(from->sourceRange()); this->setScope(from->scope()); if (auto cs = from->callstack()) { this->setCallStack(*cs); } return this; } std::optional<InlinedCallStackPtr> callstack() const { return callstack_; } void setCallStack(InlinedCallStackPtr cs) { callstack_ = std::move(cs); } // NB: This returns an ArrayRef; that means that it will // get invalidated if you resize inputs (e.g., using addInput) // We can't return a std::vector<Node*>& because there's no // way to soundly cast to std::vector<const Node*> (an insane // implementation of std::vector could make this representationally // different.) at::ArrayRef<Value*> inputs() { return inputs_; } at::ArrayRef<const Value*> inputs() const { // Vectors are not convertible in const-ness of elements, but // raw pointers are. return {inputs_.data(), inputs_.size()}; } // NB: This returns an ArrayRef; that means that it will // get invalidated if you resize inputs (e.g., using addInput) // We can't return a std::vector<Node*>& because there's no // way to soundly cast to std::vector<const Node*> (an insane // implementation of std::vector could make this representationally // different.) at::ArrayRef<Value*> outputs() { return outputs_; } at::ArrayRef<const Value*> outputs() const { // Vectors are not convertible in const-ness of elements, but // raw pointers are. return {outputs_.data(), outputs_.size()}; } Value* output(size_t i) const { return outputs_.at(i); } bool hasUses() const { for (auto o : outputs()) { if (!o->uses().empty()) { return true; } } return false; } void replaceAllUsesWith(Node* n); // replaces `this` with a new node with the same inputs and outputs // but a new node symbol. does not destroy `this` Node* replaceWithNewSymbol(Symbol new_symbol); // Checks if this node is dominated by `dominator` which means that // `dominator` will always be executed before `this` and `dominator` // is in scope of `this. bool isDominatedBy(const Node* dominator) const; // lots of things like chunk have a single input or single output, so we have // a helper to make accessing it easier Value* input() { AT_ASSERT(inputs_.size() == 1); return inputs_.at(0); } Value* output() { AT_ASSERT(outputs_.size() == 1); return outputs_.at(0); } const Value* output() const { AT_ASSERT(outputs_.size() == 1); return outputs_.at(0); } const Value* input() const { AT_ASSERT(inputs_.size() == 1); return inputs_.at(0); } // Access a particular input. This is a checked index. Value* input(size_t i) const { return inputs_.at(i); } bool hasNamedInput(const std::string& unqualName) const; Value* namedInput(const std::string& unqualName) const; Value* namedInput(Symbol name) const; std::optional<IValue> get(Symbol name) const; template <typename T> std::optional<T> get(Symbol name) const { if (auto v = get(name)) { return v->template to<T>(); } return std::nullopt; } // Returns true if the value of input name is statically known bool is_constant(Symbol name) const { return static_cast<bool>(get(name)); } bool mustBeNone() const; bool isNondeterministic() const; bool hasSideEffects() const; // instructions lowered by the interpreter and not run in the optimized graph bool notExecutedOp() const { return kind_ == prim::Constant || kind_ == prim::profile || kind_ == prim::profile_ivalue; } // Graphs // Note [Topological invariant] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // We always maintain an up-to-date topological ordering of all nodes via // the next()/prev() links. All transformations to graphs must preserve // this topological ordering: for example, it is only valid to 'addInput' // with an input which is topologically before the current node. // // Usually, it is obvious whether or not topological order is maintained; // for example, if you are adding nodes to the end of the topsort, it's // impossible for them to refer to inputs that are not in the topsort. // If it is not obvious, please comment accordingly. // Add 'node' as an input to 'this' at the end of existing // arguments. Returns the added node for ease of chaining. // // Given: %3 = f(%1, %2) // Execute: %3.addInput(%4) // Result: %3 = f(%1, %2, %4) Value* addInput(Value* value); // Add 'value' as an input to 'this' at the specified position in the // arguments. Returns the added value for ease of chaining. Value* insertInput(size_t i, Value* value); // Replace the input of 'this' at position 'i' with // 'newValue', returning the old node. // // Given: %3 = f(%1, %2) // Execute: %3.replaceInput(1, %4) // Result: %3 = f(%1, %4) Value* replaceInput(size_t i, Value* newValue); // Replace all occurrences of 'from' in the inputs of this // node with 'to'. Corresponds to llvm's replaceUsesOfWith. // // Given: %3 = f(%1, %2, %1) // Execute: %3.replaceInputWith(%1, %4) // Result: %3 = f(%4, %2, %4) void replaceInputWith(Value* from, Value* to); Value* addOutput(); Value* insertOutput(size_t i); void eraseOutput(size_t i); Block* addBlock(); void eraseBlock(size_t i); // Each Node can have a list of subblocks. These are used to define structured // nested control flow operators such as If and Loop. // The meaning of a block is specific to the kind of node it is in, but // all blocks share these semantics: // * Nested lexical scoping: If a node 'Parent' has a subblock which contains // a node 'Child', Child can use any value that was in scope for the Parent // node in addition to any values defined before 'Child' in the subblock. // * The list of inputs to the block are in scope for the duration of the // block // * the outputs of the Parent node are not in scope for the subblocks // Typically the inputs to a block that represents control flow act as // as the equivalents phi-nodes in standard SSA form, // defining a new Value to represent any term that has multiple // definitions depending on how control flowed. Outputs of the node containing // control flow serve a similiar purpose defining new values for variables // that would have different definitions depending on which way control // flowed. at::ArrayRef<Block*> blocks() { return blocks_; } at::ArrayRef<const Block*> blocks() const { // Vectors are not convertible in const-ness of elements, but // raw pointers are. return {blocks_.data(), blocks_.size()}; } // Is 'this' before 'n' in the topological order? bool isBefore(const Node* n) const; // Is 'this' after 'n' in the topological order? bool isAfter(const Node* n) const; // Insert unattached 'this' node before 'n' in the topological order. // Returns this (for chaining). // // Given: %3 = f(%1, %2) // %4 = g(%3) // and unattached: %5 = h(%1) // Execute: %5.insertBefore(%4) // Result: %3 = f(%1, %2) // %5 = h(%1) // %4 = g(%3) Node* insertBefore(Node* n); // Insert unattached 'this' node after 'n' in the topological order. // Returns this (for chaining). // // Given: %3 = f(%1, %2) // %4 = g(%3) // and unattached: %5 = h(%1) // Execute: %5.insertAfter(%4) // Result: %3 = f(%1, %2) // %4 = g(%3) // %5 = h(%1) Node* insertAfter(Node* n); // Move 'this' (already in the graph) after 'n' in the topological order. // // NOTE: Does not check that value dependencies are preserved, see // AliasDb::moveAfterTopologicallyValid // // Given: %2 = f(%1) // %3 = g(%1) // Execute: %2.moveAfter(%3) // Result: %3 = g(%1) // %2 = f(%1) // void moveAfter(Node* n); // Move a node 'n' (already in the graph) before 'this' in the topological // order. // // NOTE: Does not check that value dependencies are preserved, see // AliasDb::moveBeforeTopologicallyValid // // Given: %2 = f(%1) // %3 = g(%1) // Execute: %3.moveBefore(%2) // Result: %3 = g(%1) // %2 = f(%1) void moveBefore(Node* n); // Remove the input at 'i' from this node. // // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling // removeInput. // // Given: %3 = f(%1, %2) // Execute: %3.removeInput(1) // Result: %3 = f(%1) void removeInput(size_t i); // Remove all inputs from a node. // // Given: %3 = f(%1, %2) // Execute: %3.removeAllInputs() // Result: %3 = f() void removeAllInputs(); // Remove all outputs from a node. // // Given: %1, %2 = f() // Execute:removeAllInputs() // Result: = f() void removeAllOutputs(); // Rearrange the ordering of inputs or outputs of a node // Given: %3 = f(%1, %2) // Execute: %3.permuteInputs({1, 0}) // Result: %3 = f(%2, %1) // Each index must appear exactly once void permuteInputs(const std::vector<size_t>& new_inputs); void permuteOutputs(const std::vector<size_t>& new_inputs); // iterators of the node list starting at this node // useful for resuming a search starting at this node inline graph_node_list_iterator iterator() { return {this, 0}; } inline graph_node_list_iterator reverseIterator() { return iterator().reverse(); } inline const_graph_node_list_iterator iterator() const { return {this, 0}; } inline const_graph_node_list_iterator reverseIterator() const { return iterator().reverse(); } // Remove 'this' from the instruction list and deallocate it. // // Invariant: no outputs of 'this' may have any uses. // // Given: %2 = f(%1) // %3 = g(%1) // Execute: %2.destroy() // Result: %3 = g(%1) void destroy(); // Dynamically cast this node to the subclass indicated by the // template variable, returning nullptr if the cast is invalid.. // // Example usage: if(auto s = n.cast<Select>()) { ... } template <typename T> T* cast() { if (T::Kind == kind()) { return static_cast<T*>(this); } return nullptr; } template <typename T> const T* cast() const { if (T::Kind == kind()) { return static_cast<const T*>(this); } return nullptr; } template <typename T> T* expect() { TORCH_CHECK( T::Kind == kind(), "expected a ", T::Kind.toDisplayString(), " but found a ", kind().toDisplayString()); return static_cast<T*>(this); } bool matches(const FunctionSchema& schema) const; // XXX: this function is meant to be used with string literals only! bool matches( const char* signature_literal, at::ArrayRef<Symbol> const_inputs = {}) const; bool isMemberOf(const OperatorSet& os) const; template <typename T> bool isMemberOf(const OperatorMap<T>& om) const { auto it = om.map.find(kind()); if (it == om.map.end()) { return false; } for (auto& op : it->second) { if (matches(op.first->schema())) { return true; } } return false; } const FunctionSchema& schema() const; const FunctionSchema* maybeSchema() const; const Operator& getOperator() const; Operation getOperation() const; const Operator* maybeOperator() const; void dump() const; std::ostream& print( std::ostream& out, size_t level, std::vector<const Node*>* groups, bool print_source_locations = true, bool print_attributes = true, bool print_scopes = true, bool print_body = true) const; virtual ~Node() { if (wrap_) { wrap_->clear(); } } // Methods for accessing attributes Node* copyAttributes(const Node& rhs) { values_.clear(); for (const AVPtr& i : rhs.values_) { values_.push_back(i->clone()); } return this; } bool hasAttribute(Symbol name) const { AT_ASSERT(name.is_attr()); return findAttr(name, false) != values_.end(); } bool hasAttributeS(const std::string& name) const { return hasAttribute(Symbol::attr(name)); } AttributeKind kindOf(Symbol name) const { AT_ASSERT(name.is_attr()); return (*findAttr(name, true))->kind(); } AttributeKind kindOfS(const std::string& name) const { return kindOf(Symbol::attr(name)); } Node* removeAttribute(Symbol name) { AT_ASSERT(name.is_attr()); values_.erase(findAttr(name, true)); return this; } Node* removeAttributeS(const std::string& name) { return removeAttribute(Symbol::attr(name)); } bool hasAttributes() const { return !values_.empty(); } size_t numAttributes() const { return values_.size(); } // The names are returned in order, since name actually is the index. std::vector<Symbol> attributeNames() const { std::vector<Symbol> names; names.reserve(values_.size()); for (const AVPtr& a : values_) { names.push_back(a->name); } return names; } std::vector<const char*> attributeNamesS() const { std::vector<const char*> names; names.reserve(values_.size()); for (const AVPtr& a : values_) { names.push_back(a->name.toUnqualString()); } return names; } #define CREATE_ACCESSOR(Kind, method) \ Node* method##_(Symbol name, Kind##Attr::ConstructorType v) { \ return setAttr<Kind##Attr>( \ name, std::forward<Kind##Attr::ConstructorType>(v)); \ } \ const Kind##Attr::ValueType& method(Symbol name) const { \ return getAttr<Kind##Attr>(name); \ } CREATE_ACCESSOR(Float, f) CREATE_ACCESSOR(Complex, c) CREATE_ACCESSOR(Floats, fs) CREATE_ACCESSOR(ComplexVals, cs) CREATE_ACCESSOR(String, s) CREATE_ACCESSOR(Strings, ss) CREATE_ACCESSOR(Int, i) CREATE_ACCESSOR(Ints, is) CREATE_ACCESSOR(Graph, g) CREATE_ACCESSOR(Graphs, gs) CREATE_ACCESSOR(Type, ty) CREATE_ACCESSOR(Types, tys) CREATE_ACCESSOR(IValue, ival) #undef CREATE_ACCESSOR // Our Graphs are not very const-correct, so we need to allow returning // non-const references too GraphAttr::ValueType& g(Symbol name) { return getAttr<GraphAttr>(name); } // does not use CREATE_ACCESSOR because we need additional asserts Node* t_(Symbol name, TensorAttr::ConstructorType v) { return setAttr<TensorAttr>( name, std::forward<TensorAttr::ConstructorType>(v)); } const TensorAttr::ValueType& t(Symbol name) const { return getAttr<TensorAttr>(name); } Node* ts_(Symbol name, TensorsAttr::ConstructorType v) { return setAttr<TensorsAttr>( name, std::forward<TensorsAttr::ConstructorType>(v)); } const TensorsAttr::ValueType& ts(Symbol name) const { return getAttr<TensorsAttr>(name); } Block* findCommonAncestorBlockWith(Node* n); size_t blocksFromGraphBlock(); private: void printAttrValue(std::ostream& out, const Symbol& name) const; void printAttributes(std::ostream& out, bool ignore_subgraph) const; template <typename T> Node* setAttr(Symbol name, typename T::ConstructorType v) { AT_ASSERT(name.is_attr()); auto it = findAttr(name, false); auto nv = AVPtr(new T(name, std::forward<typename T::ConstructorType>(v))); // NOLINTNEXTLINE(bugprone-branch-clone) if (it == values_.end()) { values_.push_back(std::move(nv)); } else { *it = std::move(nv); } return this; } template <typename T> typename T::ValueType& getAttr(Symbol name) const { AT_ASSERT(name.is_attr()); auto it = findAttr(name, true); auto* child = dynamic_cast<T*>(it->get()); if (child == nullptr) { throw IRAttributeError(name, true); } return child->value(); } using AVPtr = AttributeValue::Ptr; // NB: For determinism, we use a vector rather than a hash map. This does // mean that lookups are O(n), so you shouldn't use Attributes to store // a big pile of messages. std::vector<AVPtr> values_; std::vector<AVPtr>::iterator findAttr(Symbol name, bool required) { AT_ASSERT(name.is_attr()); auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) { return v->name == name; }); if (required && it == values_.end()) { throw IRAttributeError(name, false); } AT_ASSERT(!required || it != values_.end()); return it; } std::vector<AVPtr>::const_iterator findAttr(Symbol name, bool required) const { AT_ASSERT(name.is_attr()); auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) { return v->name == name; }); if (required && it == values_.end()) { throw IRAttributeError(name, false); } AT_ASSERT(!required || it != values_.end()); return it; } enum class MoveSide { BEFORE, AFTER }; bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const; std::pair<Value*, const Argument&> findInput(Symbol name); // Lookup iterator in use list of _input i_ that corresponds to its use of // _this_ use_list::iterator findUseForInput(size_t i); // remove the use of input i, this sets input i to nullptr, but // is only used internally to Node before setting it to a new value // or erasing the entry from the list. Value* dropInput(size_t i); bool inBlockList() const { if (next() == nullptr) { AT_ASSERT(prev() == nullptr); } return next() != nullptr; } void removeFromList(); void lint() const; void assignTopoPosition(); protected: // subclasses must override // this function is used by createClone to initialize a new version // of a node in another graph. It should allocate a new instance of the same // concrete type as 'this', but in graph 'g' which might be different // than graph_ virtual Node* allocNewInstance(Graph* g) { return new Node(g, kind()); } // create a copy of all properties of Node s into this. // subclasses should extend if they have additional information to copy. // 'this' will be allocated with s->allocNewInstance(g) so it should have // the same concrete type as 's' virtual void cloneFrom(Node* s); }; struct Block { friend struct Node; friend struct Graph; AT_DISALLOW_COPY_AND_ASSIGN(Block); TORCH_API Block(Graph* graph_, Node* node_); at::ArrayRef<Value*> inputs() { return input_->outputs(); } at::ArrayRef<const Value*> inputs() const { const auto& inputs = input_->outputs(); return {inputs.data(), inputs.size()}; } at::ArrayRef<Value*> outputs() { return output_->inputs(); } at::ArrayRef<const Value*> outputs() const { return static_cast<const Node*>(output_)->inputs(); } graph_node_list nodes() { return {input_, kNextDirection}; } const_graph_node_list nodes() const { return {input_, kNextDirection}; } Node* return_node() { return output_; } const Node* return_node() const { return output_; } Node* param_node() { return input_; } const Node* param_node() const { return input_; } /** * @warning NEVER pass raw pointer of smart pointer managed Graph to Python. * Check #87343 for details. */ Graph* owningGraph() { return graph_; } const Graph* owningGraph() const { return graph_; } Node* owningNode() { return owning_node_; } const Node* owningNode() const { return owning_node_; } Value* addInput(const std::string& name = "") { Value* v = input_->addOutput(); v->setDebugName(name); return v; } Value* insertInput(size_t i, const std::string& name = "") { Value* v = input_->insertOutput(i); v->setDebugName(name); return v; } void eraseInput(size_t i) { input_->eraseOutput(i); } void removeAllInputs() { input_->removeAllOutputs(); } size_t registerOutput(Value* v) { output_->addInput(v); return outputs().size() - 1; } size_t insertOutput(size_t i, Value* n) { output_->insertInput(i, n); return i; } void eraseOutput(size_t i) { output_->removeInput(i); } void removeAllOutputs() { output_->removeAllInputs(); } void replaceOutput(size_t i, Value* n) { output_->replaceInput(i, n); } void permuteOutputs(const std::vector<size_t>& new_inputs) { output_->permuteInputs(new_inputs); } void permuteInputs(const std::vector<size_t>& new_inputs) { input_->permuteOutputs(new_inputs); } Node* appendNode(Node* n) { AT_ASSERT(n->graph_ == graph_ && !n->inBlockList()); n->insertBefore(output_); return n; } Node* prependNode(Node* n) { AT_ASSERT(n->graph_ == graph_ && !n->inBlockList()); n->insertAfter(input_); return n; } // clone all inputs, nodes, and outputs from src and append them // to the inputs, nodes, and outputs of this block // value_map is used whenever a node in src references a free variable // in src to look up its corresponding value TORCH_API void cloneFrom(Block* src, std::function<Value*(Value*)> value_map); TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map); TORCH_API std::shared_ptr<Wrap<Block>> wrap() { if (!wrap_) { wrap_ = std::make_shared<Wrap<Block>>(this); } return wrap_; } virtual ~Block() { if (wrap_) { wrap_->clear(); } } void clear() { removeAllOutputs(); for (auto it = nodes().rbegin(); it != nodes().rend(); it++) { it.destroyCurrent(); } removeAllInputs(); } private: void reIndexTopology(); // get rid of all nodes // destroys in reverse order so that uses internal to this block // do not have to be removed before you can destroy the block void destroy(); Graph* const graph_; // holds outputs in a way that can be reflected // as a Use object // also used as the beginning/end of the circular node list to avoid // having corner cases where the list is empty. Node* const output_; Node* const input_; Node* const owning_node_; // either the node that has this block or nullptr for root // a managing wrapper for Python to allow invalidation std::shared_ptr<Wrap<Block>> wrap_; }; struct Graph : std::enable_shared_from_this<Graph> { AT_DISALLOW_COPY_AND_ASSIGN(Graph); friend struct Node; friend struct Value; friend struct Block; private: // only used to keep track of allocated nodes // actual representation of Graph is done with // inputs, outputs, nodes std::unordered_set<const Node*> all_nodes; std::unordered_set<const Value*> all_values; std::unordered_set<const Block*> all_blocks; size_t next_unique_{0}; std::unordered_map<std::string, Value*> unique_names_; // name_base_suffix tracks largest suffix currently used by all names sharing // same name_base. Key of this map is name_base, value is largest suffix // numeric value. std::unordered_map<std::string, size_t> name_base_suffix_; ScopePtr current_scope_; Block* const block_; // when insertNode() is called, the node is inserted before this node // by default this is set to append to the top level block Node* insert_before_; int64_t predicted_insert_count_ = 0; std::optional<size_t> op_version_; public: Graph(ScopePtr scope_root = c10::make_intrusive<Scope>()) : current_scope_(std::move(scope_root)), block_(new Block(this, nullptr)), insert_before_(return_node()) {} at::ArrayRef<Value*> inputs() { return block_->inputs(); } at::ArrayRef<const Value*> inputs() const { const Block& block = *block_; return block.inputs(); } at::ArrayRef<Value*> outputs() { return block_->outputs(); } at::ArrayRef<const Value*> outputs() const { const Block& block = *block_; return block.outputs(); } graph_node_list nodes() { return block_->nodes(); } const_graph_node_list nodes() const { const Block& block = *block_; return block.nodes(); } Node* param_node() { return block_->param_node(); } const Node* param_node() const { return block_->param_node(); } Node* return_node() { return block_->return_node(); } const Node* return_node() const { return block_->return_node(); } const std::unordered_map<std::string, Value*>& debugNames() const { return unique_names_; } TORCH_API void push_scope(const std::string& scope_name); TORCH_API void pop_scope(); ScopePtr current_scope() { return current_scope_; } void set_op_version(std::optional<size_t> version) { op_version_ = version; } std::optional<size_t> get_op_version() { return op_version_; } void set_current_scope(ScopePtr scope) { current_scope_ = std::move(scope); } Value* addInput(const std::string& name = "") { return block_->addInput(name); } Value* insertInput(size_t i, const std::string& name = "") { return block_->insertInput(i, name); } void eraseInput(size_t i) { block_->eraseInput(i); } size_t registerOutput(Value* n) { return block_->registerOutput(n); } void eraseOutput(size_t i) { block_->eraseOutput(i); } TORCH_API Node* create(NodeKind kind, size_t num_outputs = 1); TORCH_API Node* create( NodeKind kind, ArrayRef<Value*> inputs, size_t num_outputs = 1); TORCH_API Node* createNone(); TORCH_API Node* createAutogradZero(); TORCH_API Node* createUninitialized(TypePtr typ); TORCH_API Node* createWithSubgraph(Symbol kind); TORCH_API Node* createDifferentiableSubgraph(); TORCH_API Node* createTuple( at::ArrayRef<Value*> values, TupleTypePtr optional_named_tuple = nullptr); TORCH_API Node* createTupleUnpack(Value* v); TORCH_API Node* createTupleIndex( Value* tup, Value* idx, const TypePtr& output_type); TORCH_API Node* createTupleSlice( Value* tup, int64_t beg, int64_t step_size, int64_t num_values); TORCH_API Node* createEnumName(Value* e); TORCH_API Node* createEnumValue(Value* e); TORCH_API Node* createList( const TypePtr& contained_type, at::ArrayRef<Value*> values); TORCH_API Node* createListUnpack(Value* v, size_t size); TORCH_API Node* createDict( const TypePtr& key_type, const TypePtr& value_type, at::ArrayRef<Value*> keys, at::ArrayRef<Value*> values); TORCH_API Node* createNumToTensor(Value* value); TORCH_API Node* createObject(const ClassTypePtr& type); TORCH_API Node* createSetAttr( Value* obj, const std::string& field, Value* newValue); TORCH_API Node* createGetAttr(Value* obj, const std::string& field); Value* insertGetAttr(Value* obj, const std::string& field) { return insertNode(createGetAttr(obj, field))->output(); } TORCH_API Node* createStore(const std::string& name, Value* v); TORCH_API Node* createLoad(const std::string& name, const TypePtr& type); TORCH_API Node* createIsInstance(Value* v, at::ArrayRef<TypePtr> types); TORCH_API Value* insertUncheckedCast(Value* v, TypePtr type); // Insert a ToList operator with argument \p v and output type \p type. // \returns the output of the operation. TORCH_API Value* insertToList(Value* v, TypePtr type); TORCH_API Value* insertFunctionCall( Function* callee, const MatchedSchema& matched); TORCH_API Value* insertMethodCall( std::string method_name, const MatchedSchema& matched); // Note: defined in python_ir.cpp and can be used only in python extension Node* createPythonOp( THPObjectPtr&& pyobj, const std::string& cconv, pyobj_list&& scalar_args); // clone n, making a new node in _this_ graph. // use value_map to translate inputs of n to inputs of the cloned node // if copy_blocks is false, it will not recursively clone the nested blocks // this node contains. TORCH_API Node* createClone( Node* n, const std::function<Value*(Value*)>& value_map, bool copy_blocks = true); // Insert constant IValue into the graph. TORCH_API Value* insertConstant( const IValue& val, std::optional<SourceRange> loc = std::nullopt, std::optional<ScopePtr> scope = std::nullopt); // Schema-driven insert: // This inserts a node into the graph with inputs determined from args and // kwargs using Python argument matching rules, and checks that the op matches // a known schema. // // If this node successfully completes, it guarentees the node // is a correctly-formed invocation of opname TORCH_API Value* insert( Symbol opname, at::ArrayRef<NamedValue> args, at::ArrayRef<NamedValue> kwargs = {}, const std::optional<SourceRange>& range = {}); Node* appendNode(Node* n) { return block_->appendNode(n); } Node* prependNode(Node* n) { return block_->prependNode(n); } // insert before insert_before_ node // initialized to insert at the end of the top level block // can be changed with setInsertPoint() Node* insertNode(Node* n) { AT_ASSERT( insert_before_->inBlockList() && "insert point node is no longer in a block list"); return n->insertBefore(insert_before_); } // set where nodes are inserted to append to the end of this block void setInsertPoint(Block* b) { AT_ASSERT(b->owningGraph() == this); setInsertPoint(b->return_node()); } // set where nodes are inserted to insert _before_ this node // for implementation simplicity we only support inserting before a node for // now void setInsertPoint(Node* n) { AT_ASSERT(n->owningGraph() == this && n->inBlockList()); insert_before_ = n; predicted_insert_count_ = 0; } Node* insertPoint() { return insert_before_; } // the top level block Block* block() { return block_; } const Block* block() const { return block_; } // Checks well-formedness and invariants of graph TORCH_API void lint() const; // for use in debugger TORCH_API void dump() const; TORCH_API ~Graph(); TORCH_API std::string toString(bool print_source_locations = true) const; TORCH_API std::ostream& print( std::ostream& out, bool print_source_locations = true) const; friend TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g); TORCH_API std::shared_ptr<Graph> copy(); TORCH_API std::unique_ptr<Graph> copyUnique(); TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map); private: friend TORCH_API void Lint(const AliasDb* db); TORCH_API void freeNode(Node* n); TORCH_API void freeValue(Value* v); TORCH_API void freeBlock(Block* b); void cloneFrom(Graph& src); }; /** \brief An utility class for setting temporary insertion points. * * When an object of this class is created, it stores the current insertion * point, sets the new one, and restores the original insertion point when the * object is destroyed. */ struct WithInsertPoint { WithInsertPoint(Node* n) : prev_(n->owningGraph()->insertPoint()) { n->owningGraph()->setInsertPoint(n); } WithInsertPoint(Block* b) : WithInsertPoint(b->return_node()) {} ~WithInsertPoint() { prev_->owningGraph()->setInsertPoint(prev_); } private: Node* prev_; }; /** \brief An utility class for setting temporary scopes. * * When an object of this class is created, it stores the current scope, sets * the new one, and restores the original scope when the object is destroyed. */ struct WithCurrentScope { WithCurrentScope(Graph& g, ScopePtr scope) : graph_(&g), prev_scope_(g.current_scope()) { g.set_current_scope(std::move(scope)); } ~WithCurrentScope() { graph_->set_current_scope(prev_scope_); } private: Graph* graph_; ScopePtr prev_scope_; }; // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) inline Value::Value(Node* node_, size_t offset_) : node_(node_), offset_(offset_), unique_(node_->graph_->next_unique_++), type_(TensorType::get()) { node_->graph_->all_values.emplace(this); } inline Value* Value::setType(TypePtr type) { AT_ASSERT(type); if (auto dyn = type->castRaw<c10::DynamicType>()) { type = dyn->fallback(); } type_ = std::move(type); for (Use& use : uses_) { use.user->op_ = nullptr; } return this; } inline Graph* Value::owningGraph() { return node()->owningGraph(); } inline const Graph* Value::owningGraph() const { return node()->owningGraph(); } /************* All nodes not required to be defined before Graph **************/ struct ProfileOp : public Node { static const Symbol Kind; ProfileOp(Graph* graph, std::function<void(std::vector<IValue>&)> callback) : Node(graph, ::c10::prim::profile), callback_(std::move(callback)) {} void cloneFrom(Node* other_) override; Node* allocNewInstance(Graph* g) override; const std::function<void(std::vector<IValue>&)>& getCallback() const { return callback_; } void setCallback(std::function<void(std::vector<IValue>&)> callback) { callback_ = std::move(callback); } bool hasSeenTensor() const { return has_seen_tensor_; } void setHasSeenTensor(bool has_seen_tensor) { has_seen_tensor_ = has_seen_tensor; } private: std::function<void(std::vector<IValue>&)> callback_; bool has_seen_tensor_ = false; }; struct TORCH_API ProfileIValueOp : public Node { static const Symbol Kind; ProfileIValueOp( Graph* graph, std::function<void(std::vector<IValue>&)> callback) : Node(graph, ::c10::prim::profile_ivalue), callback_(std::move(callback)) {} void cloneFrom(Node* other_) override; Node* allocNewInstance(Graph* g) override; const std::function<void(std::vector<IValue>&)>& getCallback() const { return callback_; } void setCallback(std::function<void(std::vector<IValue>&)> callback) { callback_ = std::move(callback); } private: std::function<void(std::vector<IValue>&)> callback_; }; // execute a Python function, used for Ops we can't optimize but that we want to // optimize around // // Note: actual implementation (ConcretePythonOp) is defined in python_ir.cpp // which is not included in libtorch.so. We still include some bits and pieces // of PythonOp here to enable writing simple passes generically. In general, // python-aware bits need to be moved to the descendant classes. struct TORCH_API PythonOp : public Node { using Node::Node; virtual std::string name() const = 0; virtual void writeScalars(std::ostream& out) const = 0; void cloneFrom(Node* other_) override = 0; Node* allocNewInstance(Graph* g) override = 0; // recover the autograd.Function instance, if this PythonOp's function // was originally SomeFunction.apply // used in ONNX for discovering symbolics virtual std::optional<THPObjectPtr> autogradFunction() const = 0; virtual void lint_python() const = 0; }; TORCH_API void LintGraph(const std::shared_ptr<Graph>& graph); TORCH_API at::ArrayRef<Value*> createTupleUnpack(Value* v); /** Insert graph \p CALLEE into graph \p G using \p INPUTS as input values. * The insertion happens at the current insertion point. * Optionally, one can also pass \p VALUE_MAP to get a map between \p CALLEE * values and their cloned copies in \p G. */ TORCH_API std::vector<Value*> insertGraph( Graph& g, Graph& callee, ArrayRef<Value*> inputs); TORCH_API std::vector<Value*> insertGraph( Graph& g, Graph& callee, ArrayRef<Value*> inputs, std::unordered_map<Value*, Value*>& value_map); /** Insert function \p CALLEE after node \p TO_REPLACE, remove the node and * replace all its uses with corresponding outputs of the inserted function. * This asserts that the number of outputs of the original node and the * graph are the same. */ TORCH_API std::vector<Value*> inlineCallTo( Node* to_replace, GraphFunction* callee, bool use_graph = true); TORCH_API std::vector<Value*> inlineCallTo( Node* to_replace, GraphFunction* callee, Graph* callee_graph); /** If there is only one value in \p OUTPUTS and its kind is Tuple, insert a * tuple unpack node and return the resulting values. */ TORCH_API std::vector<Value*> unpackOutputs(const std::vector<Value*>& outputs); TORCH_API std::vector<Node*> findAllNodes(Graph& g, Symbol kind, bool recurse); TORCH_API std::vector<Node*> findAllNodes(Block& b, Symbol kind, bool recurse); TORCH_API std::vector<Node*> findAllNodes( at::ArrayRef<Block*> a, Symbol kind, bool recurse); struct TORCH_API OperatorSet { OperatorSet(std::initializer_list<const char*> sig_literals); std::vector<std::shared_ptr<Operator>> getOps() const; void insert(std::initializer_list<const char*> sig_literals); private: friend struct Node; std::unordered_map<Symbol, std::vector<std::shared_ptr<Operator>>> ops; }; template <typename T> // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct OperatorMap { // Type aliasing using OpMapType = typename std::pair<std::shared_ptr<Operator>, T>; using ValueType = std::vector<OpMapType>; using MapType = std::unordered_map<Symbol, ValueType>; OperatorMap() = default; // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) explicit OperatorMap( std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> init) { insert(init); } // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) explicit OperatorMap(std::initializer_list<std::pair<const char*, T>> init) { insert(init); } void insert(const std::shared_ptr<Operator>& op, T val) { // Remove if exists before insert erase(op); map[Symbol::fromQualString(op->schema().name())].emplace_back( std::make_pair(op, val)); } void insert(const OperatorSet& op_set, T val) { for (auto& op : op_set.getOps()) { insert(op, val); } } void insert( std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> v) { for (auto& el : v) { insert(el.first, el.second); } } void insert(std::initializer_list<std::pair<const char*, T>> v) { for (auto& el : v) { insert(getOperatorForLiteral(el.first), el.second); } } void erase(const std::shared_ptr<Operator>& op) { auto it = map.find(Symbol::fromQualString(op->schema().name())); if (it == map.end()) { return; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first->schema() == op->schema()) { it->second.erase(vit); break; } } if (it->second.size() == 0) { map.erase(Symbol::fromQualString(op->schema().name())); } } bool contains(const Operator& op) const { const auto it = map.find(Symbol::fromQualString(op.schema().name())); if (it == map.end()) { return false; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first->schema() == op.schema()) { return true; } } return false; } bool contains(const Node* n) const { return n->maybeOperator() && contains(n->getOperator()); } std::optional<T> find(const Operator& op) { const auto it = map.find(Symbol::fromQualString(op.schema().name())); if (it == map.end()) { return std::nullopt; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first->schema() == op.schema()) { return vit->second; } } return std::nullopt; } // TODO: return iterator std::vector<OpMapType> getAllKeysAndValues() const { std::vector<OpMapType> keys_values; keys_values.reserve(map.size()); for (auto& symbol_mapping : map) { auto& vec = symbol_mapping.second; for (auto& pair : vec) { keys_values.push_back(pair); } } return keys_values; } private: friend struct Node; MapType map; }; template <typename T> // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct FunctionSchemaMap { // Type aliasing using FuncSchemaMapType = typename std::pair<FunctionSchema, T>; using ValueType = std::vector<FuncSchemaMapType>; using MapType = std::unordered_map<Symbol, ValueType>; FunctionSchemaMap() = default; void insert(const FunctionSchema& schema, T val) { // Remove if exists before insert erase(schema); map[Symbol::fromQualString(schema.name())].emplace_back( std::make_pair(schema, val)); } void erase(const FunctionSchema& schema) { auto it = map.find(Symbol::fromQualString(schema.name())); if (it == map.end()) { return; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first == schema) { it->second.erase(vit); break; } } if (it->second.size() == 0) { map.erase(Symbol::fromQualString(schema.name())); } } bool contains(const FunctionSchema& schema) const { const auto it = map.find(Symbol::fromQualString(schema.name())); if (it == map.end()) { return false; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first->schema() == schema) { return true; } } return false; } std::optional<T> find(const FunctionSchema& schema) const { const auto it = map.find(Symbol::fromQualString(schema.name())); if (it == map.end()) { return std::nullopt; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first == schema) { return vit->second; } } return std::nullopt; } // TODO: return iterator std::vector<FuncSchemaMapType> getAllKeysAndValues() const { std::vector<FuncSchemaMapType> keys_values; keys_values.reserve(map.size()); for (auto& symbol_mapping : map) { auto& vec = symbol_mapping.second; for (auto& pair : vec) { keys_values.push_back(pair); } } return keys_values; } private: friend struct Node; MapType map; }; } // namespace torch::jit ```
================================================================================================================================== SOURCE CODE FILE: ir_views.h LINES: 1 SIZE: 4.67 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\ir_views.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <torch/csrc/jit/ir/ir.h> namespace torch::jit { struct IfView { explicit IfView(Node* node) : node_(node) { AT_ASSERT(node->kind() == ::c10::prim::If); } Value* cond() const { return node_->input(0); } Block* thenBlock() const { return node_->blocks().at(0); } Block* elseBlock() const { return node_->blocks().at(1); } ArrayRef<Value*> thenOutputs() const { return thenBlock()->outputs(); } ArrayRef<Value*> elseOutputs() const { return elseBlock()->outputs(); } ArrayRef<Value*> outputs() const { return node_->outputs(); } Node* node() const { return node_; } operator Node*() const { return node_; } void permuteOutputs(const std::vector<size_t>& new_output_order) { node_->permuteOutputs(new_output_order); thenBlock()->permuteOutputs(new_output_order); elseBlock()->permuteOutputs(new_output_order); } private: Node* node_; }; struct LoopView { explicit LoopView(Node* node) : node_(node) { AT_ASSERT( node->kind() == ::c10::prim::Loop || node->kind() == ::c10::onnx::Loop); } Block* bodyBlock() const { return node_->blocks().at(0); } Value* cond() const { return node_->input(0); } Value* maxTripCount() const { return node_->input(0); } Value* inputCond() const { return node_->input(1); } Value* nextCond() const { return bodyBlock()->outputs().at(0); } Value* currentTripCount() const { return bodyBlock()->inputs().at(0); } ArrayRef<Value*> carriedInputs() const { // skip trip count and cond return node_->inputs().slice(2); } ArrayRef<Value*> carriedInputsWithCond() const { // skip trip count and cond return node_->inputs().slice(1); } ArrayRef<Value*> carriedOutputs() const { return node_->outputs(); } ArrayRef<Value*> bodyCarriedInputs() const { // skip trip count and cond return bodyBlock()->inputs().slice(1); } ArrayRef<Value*> bodyCarriedOutputs() const { return bodyBlock()->outputs().slice(1); } Node* node() const { return node_; } operator Node*() const { return node_; } void permuteLoopCarried(const std::vector<size_t>& new_output_order) { node_->permuteOutputs(new_output_order); // skip trip count and cond node_->permuteInputs(adjustIndices(2, new_output_order)); auto adjusted_block_order = adjustIndices(1, new_output_order); bodyBlock()->permuteOutputs(adjusted_block_order); bodyBlock()->permuteInputs(adjusted_block_order); } void replaceMaxTripCount(Value* new_max_trip_count) { node_->replaceInput(0, new_max_trip_count); } void replaceInputCondition(Value* new_input_condition) { node_->replaceInput(1, new_input_condition); } // our way of encoding loops makes them difficult to turn back into python // syntax. we have to check properties of the condition and trip count inputs // to figure out which one it initially was. ModifiedLoops are not directly // mappable to either For or While enum LoopType { While, For, ModifiedLoop }; LoopType loopType() { auto trip_count = toIValue(maxTripCount()); auto cond_input = toIValue(inputCond()); auto cond_next = toIValue(nextCond()); bool condition_is_always_true = cond_input && cond_input->toBool() && cond_next && cond_next->toBool(); bool trip_count_is_specified = !trip_count || // trip is not a constant trip_count->toInt() != std::numeric_limits<int64_t>::max() || // it is a constant but not // the default one !currentTripCount() ->uses() .empty(); // it is actually being used in the body. if (condition_is_always_true) { // if the trip count was not specified this was a user-written while True: return trip_count_is_specified ? For : While; } else { if (trip_count_is_specified) { return ModifiedLoop; } return While; } } private: Node* node_; // adjust index_ordering by adding indices 0 - thorugh adjust, and // incrementing all existing inputs by adjust static std::vector<size_t> adjustIndices( size_t adjust, const std::vector<size_t>& index_ordering) { std::vector<size_t> adjusted; adjusted.reserve(adjust + index_ordering.size()); for (const auto i : c10::irange(adjust)) { adjusted.push_back(i); } for (auto index : index_ordering) { adjusted.push_back(index + adjust); } return adjusted; } }; } // namespace torch::jit ```