diff --git a/ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..57dcf56d734e80e5d37a39ded64de0ecaf3f150d --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c10c697dcdc57d4b5c8c4e89861530f08ae3d32bf6f2353d0ba1503ad1c1f02 +size 9372 diff --git a/ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a1b8bdb15c0689e2022687d2da205aac2380a9b --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6182c43529a98a93a7c2686bd01bf0187a0e89331f98b3e876f957b9cf8bac7 +size 9387 diff --git a/ckpts/universal/global_step20/zero/11.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/11.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d45af0a7ca4179149dac1409c2a7e95c4836ddb1 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:502c0eed83d1b276adba1696ecd37f073ec756e1cfbb2044add3acfc3e407f72 +size 9293 diff --git a/ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d8f664c07cd9124569d62484eb887863f02a7218 --- /dev/null +++ b/ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb0f36303a9db36196bf043fc9ed8b97c9d409fbcd614248b4b1d46a5d0429bf +size 33555612 diff --git a/ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..988d53367d64ae9de5eff4dd4e2d5fd4c35e9da8 --- /dev/null +++ b/ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a7867e0253a7aaff53be04d3caeabae2443153a978322a79e2aefa516476c7f +size 33555627 diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h new file mode 100644 index 0000000000000000000000000000000000000000..128b193b63aa53d70272754c81226929871f1006 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +using Stack = std::vector; +using DebugHandle = int64_t; + +class Function; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct Code { + std::vector instructions_; + std::vector debug_handles_; + std::vector op_names_; + std::vector operator_input_sizes_; + std::vector> operators_; + std::vector constants_; + std::vector types_; + // TODO After we actually export CALL instructions we can remove this. + // We may need a two-stage importing scheme, where we firstly construct all + // function objects, and then append referenced function pointers. This could + // be done in parseMethods(). + std::vector functions_; + size_t register_size_ = 0; // Aggregated output size. + // initialized means operators_ array is filled with operators + bool initialized = false; +}; + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h new file mode 100644 index 0000000000000000000000000000000000000000..7044aa644d67e83ba6f7eb28bd0d6cacf1ebb182 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h @@ -0,0 +1,57 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +/* + * MobileDebugTable: + * Deserializes debug_pkl and callstack_map records from PT model's zip archive + * and stores them in a map of debug handles to DebugInfoPair. Debug handles are + * unique per model and runtime, be in lite interpreter or delegate, an + * exception of BackendRuntimeException should raised using debug handles. + * getSourceDebugString method is responsible for translating debug + * handles to correspond debug information. + * This debug informatin includes stack trace of model level source code and + * module hierarchy where the exception occurred. + */ +class MobileDebugTable { + public: + MobileDebugTable() = default; + MobileDebugTable( + std::unique_ptr& reader, + const std::shared_ptr& cu); + + template + MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {} + + std::string getSourceDebugString( + const int64_t debug_handle, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + std::string getSourceDebugString( + const std::vector& debug_handles, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + std::string getModuleHierarchyInfo( + const int64_t debug_handle, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + std::string getModuleHierarchyInfo( + const std::vector& debug_handles, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + + const ska::flat_hash_map& getCallStackPtrMap() + const { + return callstack_ptr_map_; + } + + private: + std::pair getSourceDebugModuleHierarchyInfo( + const std::vector& debug_handles, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + ska::flat_hash_map callstack_ptr_map_; +}; + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h new file mode 100644 index 0000000000000000000000000000000000000000..708913dafe18a71cdf22a117cf953f93110ac680 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h @@ -0,0 +1,196 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(HAVE_MMAP) +#include +#include +#include +#include +#include +#endif + +/** + * @file + * + * Helpers for identifying file formats when reading serialized data. + * + * Note that these functions are declared inline because they will typically + * only be called from one or two locations per binary. + */ + +namespace torch { +namespace jit { + +/** + * The format of a file or data stream. + */ +enum class FileFormat { + UnknownFileFormat = 0, + FlatbufferFileFormat, + ZipFileFormat, +}; + +/// The size of the buffer to pass to #getFileFormat(), in bytes. +constexpr size_t kFileFormatHeaderSize = 8; +constexpr size_t kMaxAlignment = 16; + +/** + * Returns the likely file format based on the magic header bytes in @p header, + * which should contain the first bytes of a file or data stream. + */ +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline FileFormat getFileFormat(const char* data) { + // The size of magic strings to look for in the buffer. + static constexpr size_t kMagicSize = 4; + + // Bytes 4..7 of a Flatbuffer-encoded file produced by + // `flatbuffer_serializer.h`. (The first four bytes contain an offset to the + // actual Flatbuffer data.) + static constexpr std::array kFlatbufferMagicString = { + 'P', 'T', 'M', 'F'}; + static constexpr size_t kFlatbufferMagicOffset = 4; + + // The first four bytes of a ZIP file. + static constexpr std::array kZipMagicString = { + 'P', 'K', '\x03', '\x04'}; + + // Note that we check for Flatbuffer magic first. Since the first four bytes + // of flatbuffer data contain an offset to the root struct, it's theoretically + // possible to construct a file whose offset looks like the ZIP magic. On the + // other hand, bytes 4-7 of ZIP files are constrained to a small set of values + // that do not typically cross into the printable ASCII range, so a ZIP file + // should never have a header that looks like a Flatbuffer file. + if (std::memcmp( + data + kFlatbufferMagicOffset, + kFlatbufferMagicString.data(), + kMagicSize) == 0) { + // Magic header for a binary file containing a Flatbuffer-serialized mobile + // Module. + return FileFormat::FlatbufferFileFormat; + } else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) { + // Magic header for a zip file, which we use to store pickled sub-files. + return FileFormat::ZipFileFormat; + } + return FileFormat::UnknownFileFormat; +} + +/** + * Returns the likely file format based on the magic header bytes of @p data. + * If the stream position changes while inspecting the data, this function will + * restore the stream position to its original offset before returning. + */ +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline FileFormat getFileFormat(std::istream& data) { + FileFormat format = FileFormat::UnknownFileFormat; + std::streampos orig_pos = data.tellg(); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + std::array header; + data.read(header.data(), header.size()); + if (data.good()) { + format = getFileFormat(header.data()); + } + data.seekg(orig_pos, data.beg); + return format; +} + +/** + * Returns the likely file format based on the magic header bytes of the file + * named @p filename. + */ +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline FileFormat getFileFormat(const std::string& filename) { + std::ifstream data(filename, std::ifstream::binary); + return getFileFormat(data); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static void file_not_found_error() { + std::stringstream message; + message << "Error while opening file: "; + if (errno == ENOENT) { + message << "no such file or directory" << std::endl; + } else { + message << "error no is: " << errno << std::endl; + } + TORCH_CHECK(false, message.str()); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline std::tuple, size_t> get_file_content( + const char* filename) { +#if defined(HAVE_MMAP) + int fd = open(filename, O_RDONLY); + if (fd < 0) { + // failed to open file, chances are it's no such file or directory. + file_not_found_error(); + } + struct stat statbuf {}; + fstat(fd, &statbuf); + size_t size = statbuf.st_size; + void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + close(fd); + auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); }; + std::shared_ptr data(reinterpret_cast(ptr), deleter); +#else + FILE* f = fopen(filename, "rb"); + if (f == nullptr) { + file_not_found_error(); + } + fseek(f, 0, SEEK_END); + size_t size = ftell(f); + fseek(f, 0, SEEK_SET); + // make sure buffer size is multiple of alignment + size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment; + std::shared_ptr data( + static_cast(c10::alloc_cpu(buffer_size)), c10::free_cpu); + fread(data.get(), size, 1, f); + fclose(f); +#endif + return std::make_tuple(data, size); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline std::tuple, size_t> get_stream_content( + std::istream& in) { + // get size of the stream and reset to orig + std::streampos orig_pos = in.tellg(); + in.seekg(orig_pos, std::ios::end); + const long size = in.tellg(); + in.seekg(orig_pos, in.beg); + + // read stream + // NOLINT make sure buffer size is multiple of alignment + size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment; + std::shared_ptr data( + static_cast(c10::alloc_cpu(buffer_size)), c10::free_cpu); + in.read(data.get(), size); + + // reset stream to original position + in.seekg(orig_pos, in.beg); + return std::make_tuple(data, size); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline std::tuple, size_t> get_rai_content( + caffe2::serialize::ReadAdapterInterface* rai) { + size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment; + std::shared_ptr data( + static_cast(c10::alloc_cpu(buffer_size)), c10::free_cpu); + rai->read( + 0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes"); + return std::make_tuple(data, buffer_size); +} + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h new file mode 100644 index 0000000000000000000000000000000000000000..f29fe5b2e49424519005c8704232d2e4312481ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h @@ -0,0 +1,136 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/** + * Defines the public API for loading flatbuffer-serialized mobile modules. + * Note that this header must not include or depend on flatbuffer-defined + * types, to avoid leaking those details to PyTorch clients. + */ + +namespace torch { +namespace jit { + +/// All non-copied data pointers provided to `parse_and_initialize_*` functions +/// must be aligned to this boundary. Since the Module will point directly into +/// the data, this alignment is necessary to ensure that certain types/structs +/// are properly aligned. +constexpr size_t kFlatbufferDataAlignmentBytes = 16; + +/// Maps file names to file contents. +using ExtraFilesMap = std::unordered_map; + +// On high level, to produce a Module from a file on disk, we need to go +// through the follow steps: +// 1. Read: Read the file from disk -> memory +// 2. Deserialize: Parse the bytes to produce some in memory manipulable +// structure +// 3. Module initialization: Produce mobile::Module out of the structure +// produced in 2. +// Under this context, the structure described in 2. is the flatbuffer-defined +// type mobile::serialization::Module. However, this step/type is not visible in +// the public API. + +// Parse a mobile::Module from raw bytes. +// +// This function does steps 2+3 described above. +// +// Does not take ownership of `data`; if you want it to take ownership, see the +// shared_ptr overload of this function. +// +// If should_copy_tensor_memory is true, then the returned module will NOT have +// refences to `data`, so `data` can be freed immediately. +// +// If should_copy_tensor_memory is false, then returned module will have tensors +// that points inside of `data`; the caller will need to make sure that `data` +// outlives the returned Module. Also, `data` must be aligned to +// kFlatbufferDataAlignmentBytes. +TORCH_API mobile::Module parse_and_initialize_mobile_module( + void* data, + size_t size, // of `data`, in bytes. + c10::optional device = c10::nullopt, + ExtraFilesMap* extra_files = nullptr, + bool should_copy_tensor_memory = false); + +// Parse a mobile::Module from raw bytes. +// +// This function does steps 2+3 described above. +// +// The returned Module holds a reference to `data`, which must be aligned to +// kFlatbufferDataAlignmentBytes. +// +// If you do not want the Module to hold a reference to `data`, see the raw +// pointer overload of this function. +TORCH_API mobile::Module parse_and_initialize_mobile_module( + std::shared_ptr data, + size_t size, // of `data`, in bytes. + c10::optional device = c10::nullopt, + ExtraFilesMap* extra_files = nullptr); + +// Parse a mobile::Module from raw bytes, also returning JIT-related metadata. +// +// This is the same as parse_and_initialize_mobile_module() except that it also +// extracts JIT source files and constants. Can be used to construct a +// jit::Module. +TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit( + void* data, + size_t size, // of `data`, in bytes. + ExtraFilesMap& jit_sources, + std::vector& jit_constants, + c10::optional device = c10::nullopt, + ExtraFilesMap* extra_files = nullptr); + +// Load a mobile::Module from a filepath. +// +// This function does steps 1+2+3 described above. +// +// We need to have this as a convienience because Python API will need to wrap +// this. C++ clients should use one of the versions of +// parse_and_initialize_mobile_module() so they can manage the raw data more +// directly. +TORCH_API mobile::Module load_mobile_module_from_file( + const std::string& filename, + c10::optional device = c10::nullopt, + ExtraFilesMap* extra_files = nullptr); + +TORCH_API uint64_t get_bytecode_version(std::istream& in); +TORCH_API uint64_t get_bytecode_version(const std::string& filename); +TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content); + +TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer( + char* flatbuffer_content); + +// The methods below are less efficient because it need to read the stream in +// its entirity to a buffer +TORCH_API mobile::Module load_mobile_module_from_stream_with_copy( + std::istream& in, + c10::optional device = c10::nullopt, + ExtraFilesMap* extra_files = nullptr); + +TORCH_API mobile::Module parse_flatbuffer_no_object( + std::shared_ptr data, + size_t size, + c10::optional device); + +TORCH_API mobile::Module parse_and_initialize_mobile_module( + void* data, + size_t, + c10::optional, + ExtraFilesMap* extra_files, + bool should_copy_tensor_memory); + +// no op, TODO(qihan) delete +TORCH_API bool register_flatbuffer_loader(); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h new file mode 100644 index 0000000000000000000000000000000000000000..2db12f7d1937489e519d6a71c1c9af9a0efd45be --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +class Frame { + public: + explicit Frame(const Code& code) : code_(code) {} + const Code& getCode() const { + return code_; + } + + void step() { + pc_++; + } + + void jump(size_t n) { + pc_ += n; + } + + size_t getPC() const { + return pc_; + } + + const Instruction& getInstruction() const { + return code_.instructions_.at(pc_); + } + + c10::optional getDebugHandle() const { + return getDebugHandle(pc_); + } + + c10::optional getDebugHandle(size_t pc) const { + if (pc >= code_.debug_handles_.size()) { + return {}; + } + return code_.debug_handles_[pc]; + } + + private: + const Code& code_; + size_t pc_{0}; +}; + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h new file mode 100644 index 0000000000000000000000000000000000000000..fb6f77fa64d766c9bb90a7a723df7a4640e377af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h @@ -0,0 +1,86 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace torch { +namespace jit { +enum OpCode : uint8_t; +struct Instruction; +struct OperatorString; + +namespace mobile { + +class TORCH_API Function : public torch::jit::Function { + public: + explicit Function(c10::QualifiedName name); + Function( + c10::QualifiedName name, + Code code, + at::optional schema); + void run(Stack& stack) override; + at::IValue operator()(Stack& stack); + void ensure_defined() override {} + size_t num_inputs() const override; + const c10::QualifiedName& qualname() const override; + bool call(Stack&, c10::function_ref) override; + + // NOTE: the APIs below is dangerous: if you call append_instruction with + // dbg_handle and then call it without; then the dbg_handle will become + // misaligned. Therefore only use ONE variant at time. + void append_instruction(OpCode op, int X, int N, int64_t dbg_handle); + void append_instruction(OpCode op, int X, int N); + void append_operator( + const std::string& name, + const std::string& overload_name, + const c10::optional& num_specified_args); + void append_constant(const c10::IValue& constant); + void append_type(const c10::TypePtr& type); + void append_function(mobile::Function& func); + + void set_register_size(size_t size); + + int64_t get_debug_handle(size_t pc) const; + const Code& get_code() const; + Code& get_code(); + + torch::jit::Function& setSchema(c10::FunctionSchema schema) override; + bool hasSchema() const; + const c10::FunctionSchema& getSchema() const override; + + // Returns the debug handle corresponding to where the execution + // is halted due to exception. + // If no corresponding debug handle is found then -1 is returned. + const std::vector& getExceptionDebugHandles() const; + static Function& registerFunc( + const std::string& qualified_name, + const std::vector& instructions, + const std::vector& constants, + const std::vector& types, + const size_t register_size); + + // if not initialize, initialize by loading operators. + // return true of all op loaded, return false if some op is not found + // in the current runtime. Then, the ops that did not found will be filled + // in unsupported_op_names + bool initialize_operators(bool should_check_operators); + + private: + c10::QualifiedName name_; + Code code_; + at::optional schema_; // (byte-code version 4+) +}; + +c10::optional> makeOperatorFunction( + c10::OperatorName opname, + c10::optional num_specified_args); + +TORCH_API std::string operator_str(const c10::OperatorName& opname); + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h new file mode 100644 index 0000000000000000000000000000000000000000..f3eb202b7f00aa262573d72d7718d3f199eb1776 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * Loads named parameters from the serialized data in @p in. + * + * Calls #TORCH_CHECK() if the data format is not recognized. + */ +TORCH_API std::map _load_parameters( + std::istream& in, + c10::optional device = c10::nullopt); + +/** + * Loads named parameters from the serialized data in @p filename. + * + * Calls #TORCH_CHECK() if the data format is not recognized. + */ +TORCH_API std::map _load_parameters( + const std::string& filename, + c10::optional device = c10::nullopt); + +// NOTE: Please prefer using _load_parameters over using the function below. +TORCH_API std::map mobile_module_to_parameter_map( + const mobile::Module& module); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h new file mode 100644 index 0000000000000000000000000000000000000000..4e4e06dfa51df97eb2d7f3f89be8c343c4eafc1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h @@ -0,0 +1,23 @@ +#pragma once + +/** + * @file + * Declarations shared between import_data.cpp and export_data.cpp + */ + +namespace torch { +namespace jit { +namespace mobile { + +namespace internal { +/** + * The name of the mobile::Module attribute which contains saved parameters, as + * a Dict of names to Tensors. Only used for Flatbuffer serialization. + */ +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) +constexpr char kSavedParametersAttributeName[] = "data"; +} // namespace internal + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..dcfa8bc2208e5ab74a1e46e400956694941640ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +struct InterpreterState { + TORCH_API explicit InterpreterState(const Code& code); + TORCH_API bool run(Stack& stack); + + private: + void enterFrame(const Code&); + void leaveFrame(); + void saveExceptionDebugHandles(); + void callFunction(torch::jit::Function& f, Stack& stack); + + c10::IValue& reg(size_t reg); + std::vector registers_; + std::vector frames_; +}; + +const std::vector& getInterpretersExceptionDebugHandles(); +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h new file mode 100644 index 0000000000000000000000000000000000000000..c4a957d00113360b1da71ef06aff455123df5bf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +class Module; + +struct TORCH_API Method { + Method(const Module* owner, Function* function); + + void run(Stack& stack) const; + void run(Stack&& stack) const { + run(stack); + } + + c10::IValue operator()(std::vector stack) const; + + const std::string& name() const { + return function_->name(); + } + + int64_t get_debug_handle(size_t pc) const { + return function_->get_debug_handle(pc); + } + + Function& function() const { + return *function_; + } + + private: + // Methods are uniquely owned by a single module. + // This raw pointer allows referencing the module + const Module* owner_; + + // Underlying unbound function + Function* function_; +}; + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h new file mode 100644 index 0000000000000000000000000000000000000000..694fe1df82c10a4227fd585282f2dd78af6c8ce8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { + +class MobileDebugInfo : public c10::DebugInfoBase { + public: + const std::string& getModelName() { + return model_name_; + } + + void setModelName(const std::string& model_name) { + model_name_ = model_name; + } + + const std::string& getMethodName() { + return method_name_; + } + + void setMethodName(const std::string& method_name) { + method_name_ = method_name; + } + + size_t getOpIdx() { + return op_idx_; + } + + void setOpIdx(size_t op_idx) { + op_idx_ = op_idx; + } + + private: + std::string model_name_; + std::string method_name_; + // TODO: Kimish + // If we launch a thread such as for at::launch, interepter continuation + // and if the caching allocator is enabled in the base thread + // then, in order to propagate this information, that is caching allocator + // is enabled, across thread boundaries we can use the mechanism provided + // by ThreadLocalDebugInfo + // Once the thread local MobileDebugInfo is accessible in the launched + // thread, it can be accessed in that thread and that thread can set + // its own thread local CachingAllocatorInfo. + // However, we cannot expect every launched thread to extract and set + // its own thread local copy of CachingAllocatorInfo. + // But this can be done in lite interpreter, where in the run method + // it can do info = + // c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::MOBILE_RUNTIME_INFO)) + // .get_caching_allocator_info(); + // GetThreadLocalCachingAllocatorInfo() = info; + // Other option is to have MobileDebugInfo itself be the place where thread + // local copy of CachingAllocatorInfo is stored. Then + // DefaultMobileCPUAllocator inspects this to decide if to use + // CachingAllocator. However, current lite interpreter does not support FORK, + // thus from the run method of lite interpreter we are not really gonna launch + // another instance of lite interpreter in a different thread. So for now not + // getting bothered about passing CachingAllocatorInfo across thread + // boundaries. c10::CachingAllocatorInfo caching_allocator_info; + size_t op_idx_ = 0; +}; + +class MobileModuleObserver { + public: + virtual ~MobileModuleObserver() = default; + + virtual void onEnterRunMethod(const int32_t) {} + virtual void onExitRunMethod( + const std::unordered_map&, + const std::string&, + const int32_t) {} + virtual void onFailRunMethod( + const std::unordered_map&, + const std::string&, + const int32_t, + const char*) {} + virtual void onEnterLoadModel(const int32_t) {} + virtual void onExitLoadModel( + const int32_t, + const std::unordered_map&) { + } // key: filename, value: file content + virtual void onFailLoadModel(const int32_t, const char*) {} + virtual void onFailLoadModel( + const int32_t, + const char*, + const std::unordered_map&) {} + virtual std::vector getDefaultExtraFiles() = 0; + virtual std::unordered_map processMetadataFromExtra( + const std::unordered_map&) = 0; +}; + +class MobileObserverConfig { + public: + void setModuleObserver(std::unique_ptr reporter) { + module_observer_ = std::move(reporter); + } + MobileModuleObserver* getModuleObserver() { + return module_observer_.get(); + } + + private: + std::unique_ptr module_observer_; +}; + +MobileObserverConfig& observerConfig(); + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h new file mode 100644 index 0000000000000000000000000000000000000000..587dbe2e9c5da4a59c0b4bda35a39fde46526d48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h @@ -0,0 +1,25 @@ +#pragma once +#include + +namespace torch { +namespace jit { +namespace mobile { +using c10::IValue; +TORCH_API void parseInstructions( + const std::string& function_name, + c10::ivalue::TupleElements&& ins_list, + c10::ivalue::TupleElements& debug_handles_m_tuple, + mobile::Function* function); +TORCH_API void parseConstants( + const c10::ivalue::TupleElements& consts_list, + mobile::Function* function); +TORCH_API void parseTypes( + const c10::ivalue::TupleElements& types_list, + mobile::Function* function); +TORCH_API void parseRegisterSize(size_t rsize, mobile::Function* function); +TORCH_API void applyUpgrader( + mobile::Function* function, + uint64_t operator_version); +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h new file mode 100644 index 0000000000000000000000000000000000000000..f14848cf93193f9f5929bdffc5a08f28712f73d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +using Stack = std::vector; + +void registerPrimOpsFunction( + const std::string& name, + const std::function& fn); + +bool hasPrimOpsFn(const std::string& name); + +std::function& getPrimOpsFn(const std::string& name); + +class prim_op_fn_register { + public: + prim_op_fn_register( + const std::string& name, + const std::function& fn) { + registerPrimOpsFunction(name, fn); + } +}; + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h new file mode 100644 index 0000000000000000000000000000000000000000..6ac74b053c36393f2bcd318bf43593af70658536 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h @@ -0,0 +1,119 @@ +#pragma once +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +// If we dont have kineto available then edge profiler does not +// work since it relies on Kineto +#ifdef USE_KINETO +class TORCH_API KinetoEdgeCPUProfiler { + public: + // This profiler only profiles KINETO events + // No GPU_FALLBACK or NVTX + /* + * @param m is the instance of mobile Module which is being profiled. + * Note that this implies that KinetoEdgeCPUProfiler can be used + * to profile specific Module (see usage below), unliked ProfilerKineto + * which can profile pytorch runtime in arbitrary scope. + * @param fname is the name of the file to which chrome trace is written. + * @param report_input_shapes: whether to record shapes of op's inputs. + * @param with_stack: whether to record model's python stacktrace for the op. + * @param with_flops: whether to report flops corresponding to the op. + * @param with_modules: whether to report original python module + * hierarchy to which the op belongs. + * @param events + * @param adjust_vulkan_timestamps: whether to adjust vulkan timestamps from + * query pool to align with cpu event times + * + * Usage pattern for this profiler must be as follows: + * + * { + * KinetoEdgeCPUProfiler(m, filename, args); + * m.forward(...); + * } + * + * The reason being that KinetoEdgeCPUProfiler has a dependency on Module + * and thus it must not outlive it. + * + * Thus, when KinetoEdgeCPUProfiler is used as RAII to do profiling + * within certain scope. In that scope, the captured reference to + * Module will outlive KinetoEdgeCPUProfiler. This is gauranteed because + * KinetoEdgeCPUProfiler must be constructed later than Module, on stack. + * + * An example of the anti-pattern and wrong usage is: + * + * std::shared_ptr profiler(m, filename, args); + * m.forward(...); + * + * Since KinetoEdgeCPUProfiler object would then be constructed on heap + * with its lifetime managed manually or via smart pointers. + */ + KinetoEdgeCPUProfiler( + const torch::jit::mobile::Module& m, + const std::string& fname, + const bool report_input_shapes = false, + const bool profile_memory = false, + const bool with_stack = false, + const bool with_flops = false, + const bool with_modules = false, + std::vector events = {}, + const bool adjust_vulkan_timestamps = false); + + const std::unique_ptr& + disableProfiler(); + const std::unique_ptr& + getProfilerResult(); + void recordBackendEvent( + const int64_t start_time_us, + const int64_t end_time_us, + const int64_t debug_handle, + const std::string& event_name, + const std::string& backend_name); + void recordBackendMemoryEvent( + void* ptr, + int64_t alloc_size, + size_t total_allocated, + size_t total_reserved, + c10::Device device); + + ~KinetoEdgeCPUProfiler(); + + private: + /* + * We store a reference to Module to make such dependency explicit, since + * a Module reference is already stored in a functor. + */ + const mobile::Module& m_; + std::string trace_file_name_; + std::unique_ptr profiler_result_; +}; + +TORCH_API KinetoEdgeCPUProfiler* getCurrentEdgeProfiler(); + +#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name) \ + if (mobile::getCurrentEdgeProfiler()) { \ + mobile::getCurrentEdgeProfiler()->recordBackendEvent( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name); \ + } + +#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \ + ptr, alloc_size, total_allocated, total_reserved, device) \ + if (mobile::getCurrentEdgeProfiler()) { \ + mobile::getCurrentEdgeProfiler()->recordBackendMemoryEvent( \ + ptr, alloc_size, total_allocated, total_reserved, device); \ + } +#else + +#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name) + +#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \ + ptr, alloc_size, total_allocated, total_reserved, device) +#endif +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..916d8db57500c2afd1ffd47b811bd422e39f11e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h @@ -0,0 +1,63 @@ +#pragma once +#include +#include + +namespace torch { +namespace jit { + +void tupleIndex(Stack& stack); + +void raiseException(Stack& stack); + +void is(Stack& stack); + +void unInitialized(Stack& stack); + +void isNot(Stack& stack); + +void aten_format(Stack& stack); + +void size(Stack& stack); + +void sym_size(Stack& stack); + +void sym_size_int(Stack& stack); + +void sym_stride_int(Stack& stack); + +void sym_numel(Stack& stack); + +void sym_storage_offset(Stack& stack); + +void sym_stride(Stack& stack); + +void device(Stack& stack); + +void device_with_index(Stack& stack); + +void dtype(Stack& stack); + +void layout(Stack& stack); + +void toPrimDType(Stack& stack); + +void dim(Stack& stack); + +void _not(Stack& stack); + +void boolTensor(Stack& stack); + +void toList(Stack& stack); + +void numToTensorScalar(Stack& stack); + +void isCuda(Stack& stack); + +void numToTensorBool(Stack& stack); + +void dictIndex(Stack& stack); + +void raiseExceptionWithMessage(Stack& stack); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h new file mode 100644 index 0000000000000000000000000000000000000000..aa47dcb64b62be9307cd7047d2ddff72d411283e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +namespace mobile { +class Module; +namespace quantization { +/* + * Device side PTQ API. + * Once the model has been prepared for quantization on server side, such model + * is sent to device. On device side the model is further trained. At the end of + * the training, before the model is readied for inference, we need to quantize + * the model. + * Usage of this API is as follows. + * PTQQuanizationHelper ptq_helper; + * ptq_helper.quantize_dynamic(m, "forward"); + * Args: + * m: Captured by reference, an instance of mobile::Module. This module will be + * mutated in place to replace its method with quantized + * equivalent. method:name: Name of the method to be quantized. AOT preparation + * for quantization must also have been done for this method. Returns: In place + * mutated `m` whose size should be smaller due to weight quantization and whose + * method should use quantized ops + */ +class TORCH_API PTQQuanizationHelper { + public: + PTQQuanizationHelper() = default; + void quantize_dynamic( + torch::jit::mobile::Module& m, + const std::string& method_name); +}; +} // namespace quantization +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..b0ecaf055f5ee3270df9f44e12ebe4d43ce70162 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +inline void noop(Stack& n) {} + +int64_t normalizeIndex(int64_t idx, int64_t list_size); + +// reference function THPVariable_to in python_variable_methods.cpp +static C10_UNUSED at::Tensor to_dispatch( + at::Tensor self, + c10::optional device, + c10::optional scalarType, + bool non_blocking, + bool copy) { + if (device && device->is_cuda()) { + at::globalContext().lazyInitCUDA(); + } + if (!device && !scalarType && !copy) { + return self; + } else if (!device) { + return self.to(*scalarType, non_blocking, copy); + } else if (!scalarType) { + return self.to(*device, non_blocking, copy); + } else { + return self.to(*device, *scalarType, non_blocking, copy); + } +} + +// Convert the tensor pointed to by \p data to a nested list. \p dim is the +// number of dimensions in the tensor and \p cur_dim is the dimension being +// processed by the current invocation. \p ty is the expected output IR type of +// the operation. \p is the scalar type of \p data. \p sizes and \p strides are +// the sizes and strides of the tensor operand and \p element_size is the size +// in bytes of one tensor element. +IValue tensorToListRecursive( + char* data, + int64_t cur_dim, + int64_t num_tensor_dims, + at::TypePtr ty, + at::ScalarType scalar_ty, + at::IntArrayRef sizes, + at::IntArrayRef strides, + size_t element_size); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h new file mode 100644 index 0000000000000000000000000000000000000000..c6b3f9376d6b328f1d7eaf6cda14b007a6fc8e45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool AddIfThenElseOp(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h new file mode 100644 index 0000000000000000000000000000000000000000..46d90d1a515f66fa19ac37c6d8621ba5f6e687de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API std::shared_ptr Canonicalize( + const std::shared_ptr& graph, + bool keep_unique_names = true); + +TORCH_API void CanonicalizeOutputs(std::shared_ptr& graph); + +TORCH_API c10::optional firstOrLastUse(Value* v, bool find_first); + +TORCH_API bool isBeforeOrAfter( + const Use& a, + const Use& b, + bool checking_before); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..4fdd86f36a46dd0c1be0c53f1731c8e686ba1796 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Eliminates common inputs among `aten::cat` ops. +TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr& graph); + +// Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies +// in the buffers used for concatenation if possible. +TORCH_API void ExpandConcatAndEliminateRedundancy( + const std::shared_ptr& graph); + +TORCH_API bool CombineConcats(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h new file mode 100644 index 0000000000000000000000000000000000000000..62293c8d7abc9bc2344ccab38d3a30c18af2fe9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Runs constant propagation on all objects unless ignore_custom_classes is +// specified as true, in which case user defined classes are skipped. This is +// useful to prevent early fusion of packing operations, which end up lowering +// away information about their constructors (e.g. packed::linear_clamp_prepack +// and prepacked::conv2d_clamp_prepack) +// Returns True if the pass made a change to the graph +TORCH_API bool ConstantPropagation( + std::shared_ptr& graph, + bool ignore_custom_classes = false); + +// runs constant propagation only on ops that have non-aliasing inputs & outputs +// Returns True if the pass made a change to the graph +TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr& graph); + +// Runs the node if its inputs are constants. Callers of this function must +// make their own determination if constant prop is appropriate - for example +// non-deterministic ops or ops with side effects. If ignore_custom_classes is +// specified, nodes that output user defined classes are not run. +TORCH_API c10::optional runNodeIfInputsAreConstant( + const Node* node, + bool ignore_custom_classes = false, + AliasDb* db = nullptr); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h new file mode 100644 index 0000000000000000000000000000000000000000..351816394d80c694d30a2423d8774d3585318af9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void CreateFunctionalGraphs(const std::shared_ptr& graph); + +TORCH_API void InlineFunctionalGraphs(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..780c11f95a9bb9dcaa4fd07aec92409d0f2cd527 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// If given a top-level graph, DCE will construct do alias analysis that allows +// for "smarter" dead code elimination (we will eliminate mutable ops if we can +// prove the mutated values are not used). Otherwise, we will not allow DCE to +// eliminate mutable ops. +// +// So, prefer to use the graph version if you can. +enum class DCESideEffectPolicy : uint8_t { + // default behavior: dead code elimination will check if a node has side + // effects + // and not delete it if it does. + DONT_DELETE_NODES_WITH_SIDE_EFFECTS, + // with this flag, dead code elimination will not check if a node has side + // effects and treat nodes with side effects like any other node, + // i.e. delete them if their outputs aren't used anywhere. + ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS +}; + +TORCH_API void EliminateDeadCode( + const std::shared_ptr& graph, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); +TORCH_API void EliminateDeadCode( + Block* block, + bool recurse = true, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); + +// Invoke the user-provided callback on all live values before deleting anything +TORCH_API void EliminateDeadCode( + Block* block, + std::function&)> cb, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..400e5997d6368d08edfacc76c969c07828a2c17b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void DecomposeOps(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h new file mode 100644 index 0000000000000000000000000000000000000000..704a0915116286ace337974c449e9a635fca4053 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h @@ -0,0 +1,29 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +struct TORCH_API LinearBNParameters { + at::Tensor linear_w; + at::Tensor linear_b; + at::Tensor bn_rm; + at::Tensor bn_rv; + double bn_eps = 0.0; + at::Tensor bn_w; + at::Tensor bn_b; +}; + +/** + * Given the current weight and bias tensors of a Linear module and parameters + * of the BatchNorm module we're folding with, compute the updated values + * for the weight and bias. + * + * The function is basically copied from torch/nn/utils/fusion.py + */ +TORCH_API std::tuple computeUpdatedLinearWeightAndBias( + const LinearBNParameters& p); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..95991e73d9eccf7473071c5ed352af56d7c114f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API extern std::function&)>& +getFuseFrozenConvAddReluImpl(); + +TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..65dc138ccd6a41be0fa709516c97a8b89eeafd98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Fuses Convolution -> Batchnorm into a single Convolution by +// folding batchnorm weights into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr& graph); + +// Fuses Convolution -> Add/Sub into a single Convolution by +// folding add constant tensor into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr& graph); + +// Fuses Convolution -> Mul/Div into a single Convolution by +// folding add constant tensor into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..e952d1c43cef39020405de944bba8b3856398ed3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Transposes the weight matrix for frozen linear modules. +// and converts it into a matmul +TORCH_API bool FrozenLinearTranspose(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..56d37518e37866fa4ee14242215e5079c9c30f4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h @@ -0,0 +1,24 @@ +/** \brief Fusing linear patterns as single at::linear for easier pattern + * matching in later passes + */ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** \brief Match the at::linear pattern and fuse it into a single at::linear + * This pass fuse the addmm or matmul + add generated by JIT back to linear + * This pass can be deleted once the JIT can emit the aten::linear in the future + */ +TORCH_API void FuseLinear(std::shared_ptr& graph); + +/** Swap functional linear CallFunctions to aten::linear + */ +TORCH_API void SwapFunctionalLinear(std::shared_ptr& graph); +/** Swap all functional linear CallFunctions in module + */ +TORCH_API void SwapFunctionalLinear(Module& module); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..0920830babb8b326e994339e8c479593091d36cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace graph_rewrite_helper { + +std::string getFuncName(Value* func_value); +Value* getValue( + const std::string& name, + const std::unordered_map& match_vmap, + const std::unordered_map& vmap); +c10::optional getIValue( + const std::string& name, + const std::unordered_map& match_vmap, + const std::unordered_map& vmap); +TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr& graph); + +bool isClampFusable( + const Match& match, + const std::unordered_map& vmap); + +// This struct contains a compiled IR patterns slated for use in the +// findPatternMatches function. The struct encapsulates the common +// information from parseIR that is used in conjunction with the +// pattern matching facility. A const instance of this struct can +// also be stored away to cache the compiled IR pattern and reduce +// runtime cost +struct PatternInfo { + std::string pattern_string; + std::unique_ptr pattern_graph; + std::unordered_map vmap; + std::vector filters; + + static PatternInfo parse_from_str( + std::string pattern_string, + const std::vector& filters = {}) { + PatternInfo rv{ + std::move(pattern_string), + std::make_unique(), + decltype(vmap){}, + filters}; + parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap); + return rv; + } +}; + +} // namespace graph_rewrite_helper +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h new file mode 100644 index 0000000000000000000000000000000000000000..c7cee8417fa457d671da2efbc6a19493762fcffb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void liftClosures(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h new file mode 100644 index 0000000000000000000000000000000000000000..a79bb56492855b6a9002fe82f9c7b9856092af51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// This pass removes 'grad_of' nodes, replacing them with conditionals of +// the form: +// if any_defined(inputs): +// outputs = +// else: +// outputs = undefineds +TORCH_API void LowerGradOf(Graph& g); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4d630392ca47df0d0a32ef1bf5d25bbb4a41c163 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// This pass converts aten ops to a normalized form. It is +// run immediately after IR generation in both the tracer and compiler, +// so downstream consumers of the IR do not need handle ops in their +// pre-normalized form. +// Currently only handles normalization of op aliases. +TORCH_API void NormalizeOps(const std::shared_ptr& graph); + +const std::unordered_map& getOperatorAliasMap(); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..11bee679164043cca58fd3f35a108fd078101a95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API std::shared_ptr ToONNX( + std::shared_ptr& state, + ::torch::onnx::OperatorExportTypes operator_export_type); +TORCH_API std::unordered_map BlockToONNX( + Block* old_block, + Block* new_block, + ::torch::onnx::OperatorExportTypes operator_export_type, + std::unordered_map& env, + bool is_sub_block = false); +TORCH_API void NodeToONNX( + Node* old_node, + Block* new_block, + ::torch::onnx::OperatorExportTypes operator_export_type, + std::unordered_map& env); +TORCH_API void RemovePrintOps(std::shared_ptr& graph); +TORCH_API void PreprocessCaffe2Ops(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h new file mode 100644 index 0000000000000000000000000000000000000000..283c313d9ee2ae8024ba286d1a5bd0ea5cf1fdd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes Dict Ops such as len() and __getitem__ +// 1. getitem optimizations +// Given a function like this: +// def foo(): +// d = {0 : 1} +// x = d[0] +// return x +// This pass produces (after dead code elimination): +// def foo(a, b): +// return 1 +// +// This optimization can only happen if the dict is not modified +// and the dict has constant, non overlapping keys. +// +// 2. len optimizations +// Given a function like this: +// def foo(): +// d = {0 : 1} +// return len(d) +// This pass produces (after dead code elimination): +// def foo(): +// return 1 +// +// This has the same requirements as the getitem optimizations. +// +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified. +TORCH_API bool PeepholeOptimizeDictIdioms(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..1e4daebd060cc9365c8994219803a65891c69d4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +// Optimizing General Graph Patterns that +// are not covered in peephole.cpp and peephole_list_idioms +TORCH_API bool PeepholeOptimizeNonTensor(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h new file mode 100644 index 0000000000000000000000000000000000000000..75f36313b3a1510dec9ec8107b1e5d1c2a781c49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// updates the types of tuples according to the type of their current inputs. +TORCH_API void RefineTupleTypes(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..e029383379f5658208a1f5806710bba7d47ce6b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Considering prim::RaiseException nodes unreachable, simplify prim::If nodes +// when one of the branches contains prim::RaiseException. +// +// This pass is illegal in general case as the modified graph might not throw +// an exception that the original graph would throw. The purpose of the pass is +// to cleanup the graph in a "risky" way by removing pathways leading to +// RaiseExceptions nodes. In some sense, this pass could be considered as a +// "Release" mode, while the original graph was in a "Debug" mode. +// The pass should only be used when such transformation is guaranteed to be +// safe by some other mechanisms. For instance, when we know exact shapes of +// tensors flowing through the graph and tensors with such shapes never cause +// exceptions. +TORCH_API void EliminateExceptions(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e597da64860be2ad26186e4862ef2db348cbe1ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace jit { +// see .cpp for docs +TORCH_API void RemoveInplaceOps(const std::shared_ptr& graph); + +TORCH_API void ImplicitCastForBinaryInplaceOps(Block* block); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..c7b80423dc5eafd88eb8f22255e472fda0d954ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace jit { + +struct Graph; +struct ArgumentSpec; + +TORCH_API void PropagateRequiresGrad(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h new file mode 100644 index 0000000000000000000000000000000000000000..83b5e657750b831bb4891569ef1b71cd87a95d1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// propagate autograd zero information through a gradient graph and +// remove grad_of blocks if present. +// Note: this is a very limited pass. It only propagates autograd zeros for +// operations generated by the symbolic autodiff code and cleans up +// AutogradAdds when possible. Outputs of other nodes are conservatively +// marked Unknown and not optimized. +TORCH_API void specializeAutogradZero(std::shared_ptr g); + +struct ProfilingRecord; + +TORCH_API void InsertProfileNodesForSpecializeAutogradZero(ProfilingRecord* pr); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..d932c0c1f74fa73b14e8d041a55e8a82d33bdd62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h @@ -0,0 +1,117 @@ +/** This file defines API for pattern-based subgraph rewrites. + * + * The API can be used for finding concrete patterns in the model and replacing + * the corresponding subgraphs with another subgraph. A special case of such + * rewrites is fusion, where the new subgraph consists of just a single node. + * + * There is a default set of the most common patterns that everyone could use. + * Alternatively, an arbitrary pattern can be registered. + */ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +// Forward declarations. +struct RewritePatternDescr; +struct Match; + +using MatchFilter = std::function< + bool(const Match&, const std::unordered_map&)>; + +/** Run pattern-based subgraph rewrites on all methods in the module. + * + * This pass will go through all methods in the module and try to replace all + * recognized patterns (see SubgraphRewriter::RegisterDefaultPatterns for the + * list of these patterns). + */ +TORCH_API Module PatternBasedRewrite(const Module& module); + +/** A class implementing API for pattern-based subgraph rewrites. + * + * To perform pattern-based subgraph rewrites on a module using this API, one + * needs to create an object of such class, register rewrite patterns and run + * the transformation pass (`runOnModule`). + * + * To use standard patterns, one could use `RegisterDefaultPatterns`. + * + * To enable rewrites of custom patterns, the custom patterns must be registered + * with `RegisterRewritePattern`. + */ +class TORCH_API SubgraphRewriter { + public: + // Run pattern-based subgraph rewrite pass on the module. + Module runOnModule(const Module& module); + + // Run pattern-based subgraph rewrite pass on the graph (used in testing). + // `filter` is a function that does extra filtering on the match. If it + // returns false for a given Match, we'll skip the Match. The filter + // function's arguments consist of a Match and a value map from parsing the + // pattern graph. Both the Match and the value map are necessary because we + // need to 1) do extra filtering on the matched result as well as 2) refer to + // the values in the matched result through the values in the pattern graph. + void runOnGraph( + std::shared_ptr& graph, + const std::vector& filters); + + void runOnGraph( + std::shared_ptr& graph, + const MatchFilter& filter = + [](const Match&, const std::unordered_map&) { + return true; + }) { + runOnGraph(graph, std::vector({filter})); + } + + // Register standard rewrite patterns. + void RegisterDefaultPatterns(); + + /** Register a custom rewrite pattern. + * + * The method takes two parameters specifying the pattern: + * \p PATTERN - IR string representing the pattern subgraph. + * \p REPLACEMENT - IR string representing the replacement subgraph. + * \p value name map - vector of pairs mapping values in the replacement graph + * to the values in the pattern graph. Used for preserving source range info + * across graph rewrite. + * + * See examples of pattern registering in `RegisterDefaultPatterns`. + */ + void RegisterRewritePattern( + const std::string& pattern, + const std::string& replacement, + const std::vector>& value_name_pair = + {}); + + private: + std::vector patterns_; + std::unordered_set nodes_to_delete_; + + void rewriteSinglePatternOnGraph( + std::shared_ptr& graph, + const RewritePatternDescr& pattern, + const std::vector& filters); + + bool overlapsWithPreviousMatches(const Match* match); +}; + +/** Rewrite pattern descriptor. + * + * This structure is used in the implementation of `SubgraphRewriter` and + * is not supposed to be used externally. + */ +struct RewritePatternDescr { + std::string pattern; + std::string replacement; + std::unordered_map value_name_map; +}; + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..824740792aaf031a0adcc181cb84a666ef539fe4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE + +TORCH_API void PropagateShapesOnGraph(std::shared_ptr& graph); + +// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE +// From [beg, end) attempt to propagate shapes and +// build up a graph that will compute all remaining symbolic +// shapes in [beg, end) that can be executed before beg + +struct ShapeComputeGraphMapping { + ShapeComputeGraphMapping( + std::shared_ptr partial_eval_shape_graph, + std::unordered_map + enclosing_graph_value_to_shape_graph_input, + std::unordered_map graph_output_to_symbolic_shape_dim) + : partial_eval_shape_graph(std::move(partial_eval_shape_graph)), + enclosing_graph_value_to_shape_graph_input_( + std::move(enclosing_graph_value_to_shape_graph_input)), + graph_output_to_symbolic_shape_dim_( + std::move(graph_output_to_symbolic_shape_dim)){}; + + std::shared_ptr partial_eval_shape_graph; + std::unordered_map + enclosing_graph_value_to_shape_graph_input_; + std::unordered_map graph_output_to_symbolic_shape_dim_; +}; + +TORCH_API c10::optional +PropagateShapesAndBuildLargeShapeComputeGraph( + std::shared_ptr& graph, + Node* beg, + Node* end); + +// don't insert complete tensor shapes in shape compute graphs and instead +// rely on our partial evaluation pipeline to propagate information. +// this is a good proxy for our ability to propagate non-complete shape +// information. +TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value); +TORCH_API bool symbolicShapeAnalysisTestModeEnabled(); + +using SSAInput = std::variant; +TORCH_API c10::optional> +calculateSymbolicShapesOnOp( + const FunctionSchema* schema, + const std::vector& inputs); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h new file mode 100644 index 0000000000000000000000000000000000000000..02e00acac08d2d1b625c02524eb51c68569515d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API CanonicalizedSymbolicShape { + // TODO: Consider in the future if it is reasonable to + // merge code with SymbolicShape or VaryingShape while keeping + // the two not implicitly convertable (and cause bugs). + CanonicalizedSymbolicShape( + const c10::SymbolicShape& orig_shape, + std::unordered_map& ss_map) { + init(orig_shape, ss_map); + } + + CanonicalizedSymbolicShape(c10::SymbolicShape& orig_shape) { + std::unordered_map new_ssmap; + init(orig_shape, new_ssmap); + } + + size_t hash() const; + + c10::SymbolicShape toSymbolicShape( + std::unordered_map& inverse_ss_map) const; + + TORCH_API friend bool operator==( + const CanonicalizedSymbolicShape& a, + const CanonicalizedSymbolicShape& b); + + private: + c10::optional> values_; + + void init( + const c10::SymbolicShape& orig_shape, + std::unordered_map& ss_map); +}; + +// SHAPE CACHE API +TORCH_API c10::optional> +get_cached_shape_function( + const FunctionSchema* schema, + const std::vector& arg_vec); + +TORCH_API void cache_shape_function( + const FunctionSchema* schema, + const std::vector& arg_vec, + const std::vector& ret_vec); + +// For use in test code +TORCH_API void clear_shape_cache(); +TORCH_API size_t get_shape_cache_size(); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h new file mode 100644 index 0000000000000000000000000000000000000000..06c77edca718cad76bc0db1f63137087fcdaf41b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h @@ -0,0 +1,511 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace torch::jit { + +// GraphExecutor creates specializations of Graphs for different +// dimensionalitities and types of inputs. + +struct ArgumentInfo { + friend struct ArgumentSpec; + using plain_data_type = uint64_t; + + bool defined() const { + return defined_; + } + at::Device device() const { + return at::Device(DeviceType(dev_type_), device_); + } + // XXX: It is guaranteed that this will return false when called on non-tensor + // arguments + bool requires_grad() const { + return requires_grad_; + } + int dim() const { + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + return dim_; + } + at::ScalarType type() const { + return at::ScalarType(type_); + } + TypePtr toType() const { + if (!defined()) + return TensorType::get(); + + return TensorType::create( + type(), device(), c10::optional(dim()), requires_grad()); + } + operator TypePtr() const { + return toType(); + } + + private: + unsigned defined_ : 1; + unsigned requires_grad_ : 1; + unsigned : 5; + unsigned dim_ : 8; + unsigned device_ : 8; + unsigned type_ : 8; + unsigned dev_type_ : 16; + unsigned : 16; +}; + +static_assert( + std::is_standard_layout::value, + "ArgumentInfo is to be a POD struct"); +static_assert( + sizeof(ArgumentInfo) == sizeof(ArgumentInfo::plain_data_type), + "ArgumentInfo is expected to be a 32-bit struct"); + +struct ArgumentSpec { + ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs) + : hash_code(c10::hash_combine( + num_flat_tensor_inputs, + num_flat_optional_inputs)) { + tensor_args.reserve(num_flat_tensor_inputs); + optional_presence.reserve(num_flat_optional_inputs); + } + + void addOptional(const IValue& input) { + bool is_present = !input.isNone(); + optional_presence.push_back(is_present); + hash_code = c10::hash_combine(hash_code, is_present); + } + + void addTensor(const IValue& input, bool with_grad) { + AT_ASSERT(input.isTensor(), "Expected Tensor but found ", input.tagKind()); + tensor_args.emplace_back(); + auto& arg = tensor_args.back(); + // Initialize all fields to 0. This is convenient, because e.g. + // requires_grad() can be checked even on tensors AND will make + // padding bits all 0s. + std::memset(&arg, 0, sizeof(ArgumentInfo)); + + // [argspec refcounting] reinterpret the IValue to avoid having to refcount + // the Tensor microbenchmarks + // https://github.com/zdevito/pytorch/commit/21e7200a0a0fc456bea2f10e95b1781f83933d10 + // show overhead in extra refcounting along this path + const at::Tensor* t = reinterpret_cast(&input); + arg.defined_ = t->defined(); + if (arg.defined_) { + arg.requires_grad_ = with_grad && autograd::Variable(*t).requires_grad(); + arg.dim_ = t->dim(); + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + at::Device device = t->device(); + arg.dev_type_ = + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + static_cast::type>(device.type()); + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + arg.device_ = device.index(); + arg.type_ = static_cast(t->scalar_type()); + } + combineHash(arg); + } + + void combineHash(const ArgumentInfo& arg) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + ArgumentInfo::plain_data_type arg_data; + std::memcpy(&arg_data, &arg, sizeof(ArgumentInfo)); + hash_code = c10::hash_combine(hash_code, arg_data); + } + + // equality is fast: check ninputs, and then check the raw array data, + // there are no size/stride indirections + // hopefully std::vector has fast equality + bool operator==(const ArgumentSpec& spec) const { + if (optional_presence != spec.optional_presence) { + return false; + } + if (tensor_args.size() != spec.tensor_args.size()) + return false; + // NB: we need to break out early when there are no elements, because + // passing a nullptr to memcmp is UB. + if (tensor_args.empty()) + return true; + return std::memcmp( + tensor_args.data(), + spec.tensor_args.data(), + tensor_args.size() * sizeof(ArgumentInfo)) == 0; + } + bool operator!=(const ArgumentSpec& spec) const { + return !(*this == spec); + } + size_t numTensors() const { + return tensor_args.size(); + } + const ArgumentInfo& tensorAt(size_t i) const { + return tensor_args[i]; + } + size_t numOptionals() const { + return optional_presence.size(); + } + bool isPresent(size_t i) const { + return optional_presence[i]; + } + size_t hashCode() const { + return hash_code; + } + + private: + size_t hash_code; // precomputed on construction + std::vector tensor_args; + std::vector optional_presence; +}; + +namespace { +static constexpr size_t ARG_SPEC_DEPTH_LIMIT = 128; +} + +// ArgumentSpecCreator takes an initial graph and comes up with a set +// of simple instructions to compute the ArgumentSpec given a set of +// input tensors. +struct TORCH_API ArgumentSpecCreator { + // instructs acts on a stack of a list of input IValues + // at the beginning the stack contains a single list of the inputs to the + // function the ENTER_ instructs descend into subobjects and push new lists + // onto the stack + enum Inst : char { + ENTER_TUPLE, // consume a tuple ivalue from the top-most list, and push the + // list of its elements onto the stack as a new list + ENTER_OBJECT, // same as ENTER_TUPLE, but the input is a class + LEAVE, // pop the top-most list from the stack + SKIP, // consume an element from the top-most list, and discard + SPECIALIZE_OPTIONAL_TENSOR, // consume a optional tensor for the top-most + // list, and add it to the ArgSpec key being + // created + SPECIALIZE_TENSOR, // consume a tensor for the top-most + // list, and add it to the ArgSpec key being created + SPECIALIZE_OPTIONAL, + // consume a nontensor optional from the top-most list, + // and add it to the ArgSpec key being created + }; + ArgumentSpecCreator(Graph& graph); + ArgumentSpec create(bool with_grad, const Stack& stack) const; + void specializeTypes(Graph& g, const ArgumentSpec& spec) const; + void dump() const; + using WrittenSlots = std::unordered_set; + + private: + void scan( + const TypePtr& typ, + size_t depth, + const WrittenSlots& written_slots); + size_t num_inputs_; + size_t num_tensors_ = 0; + size_t num_optionals_ = 0; + std::vector instructions_; +}; + +// CompleteArgumentSpec represents one particular specialization. +// It is designed so that it can be created, hashed, and compared quickly +// since it is used along the hot-path of the JIT to check if the code +// we have created is valid for the given inputs. + +// COmpleteArgumentInfoPOD is only used internally in CompleteArgumentSpec +// API users should use ArgumentInfo +struct CompleteArgumentInfoPOD { + // total size is 64-bit + unsigned is_tensor : 8; // all other fields are invalid if this is false + unsigned type : 8; // scalar type + unsigned defined : 1; + unsigned requires_grad : 1; + signed device : 14; + unsigned dev_type : 16; + unsigned + total_dims : 16; // all TensorInfoPODs are in CompleteArgumentSpec's + // tensor_info() array. total_dims is the total number of + // dimensions seen so far in all previous members of + // tensor_info(), including this tensor 2*total_dims + // becomes the offset into the sizes_strides list for the + // _next_ tensor in the tensor_info array for tensor 0, + // the offset is always 0 +}; + +static_assert( + sizeof(CompleteArgumentInfoPOD) == sizeof(int64_t), + "CompleteArgumentInfoPOD must be 64-bit struct for CompleteArgumentSpec encoding to work"); + +struct CompleteArgumentInfo; + +struct CompleteArgumentSpec { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CompleteArgumentSpec(bool with_grad, at::ArrayRef inputs) + : hash_code(0), ninputs(inputs.size()) { + int32_t all_dims = 0; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + const int32_t num_inputs = inputs.size(); + for (const auto i : c10::irange(num_inputs)) { + if (!inputs[i].isTensor()) + continue; + auto& tensor = inputs[i].toTensor(); + all_dims += tensor.defined() ? tensor.ndimension() : 0; + } + // allocate enough room for all TensorPODs and dimensions + data.resize(ninputs + all_dims * 2); + + // and reinterpret our data array as these structs + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + auto* pods = reinterpret_cast(data.data()); + int64_t* next_dim = sizes_strides(); + int32_t total_dims = 0; + for (const auto i : c10::irange(num_inputs)) { + auto& pod = pods[i]; + pod.is_tensor = static_cast(inputs[i].isTensor()); + if (pod.is_tensor) { + at::Tensor t = inputs[i].toTensor(); + pod.defined = t.defined(); + if (pod.defined) { + pod.type = static_cast(t.scalar_type()); + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + at::Device device = t.device(); + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + pod.dev_type = static_cast::type>( + device.type()); + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + pod.device = device.index(); + pod.requires_grad = with_grad && t.requires_grad(); + total_dims += t.ndimension(); + auto sizes = t.sizes(); + std::copy(sizes.begin(), sizes.end(), next_dim); + next_dim += sizes.size(); + auto strides = t.strides(); + std::copy(strides.begin(), strides.end(), next_dim); + next_dim += strides.size(); + } + } + // each POD has a running tally of all dimensions including its own + TORCH_CHECK( + total_dims < std::numeric_limits::max(), + "The number of dims cannot be packed into CompleteArgumentSpec:", + total_dims); + pod.total_dims = total_dims; + } + // we precompute the hash_code to minimize the time inside of hash + // table operations where we may need to hold a compiler cache lock. + hash_code = c10::hash_combine(0, ninputs); + for (auto d : data) { + hash_code = c10::hash_combine(hash_code, d); + } + } + + // equality is fast: check ninputs, and then check the raw array data, + // there are no size/stride indirections + bool operator==(const CompleteArgumentSpec& spec) const { + return ninputs == spec.ninputs && data == spec.data; + } + bool operator!=(const CompleteArgumentSpec& spec) const { + return !(*this == spec); + } + friend struct CompleteArgumentInfo; + CompleteArgumentInfo at(size_t i) const; + size_t size() const { + return ninputs; + } + size_t hashCode() const { + return hash_code; + } + + private: + ArrayRef tensor_info() const { + return ArrayRef( + reinterpret_cast(data.data()), ninputs); + } + // the start of the sizes_strides information, which comes after the + // CompleteArgumentInfoPOD list. + const int64_t* sizes_strides() const { + return data.data() + ninputs; + } + int64_t* sizes_strides() { + return data.data() + ninputs; + } + size_t hash_code; // precomputed on construction + size_t ninputs; + // layout is ninputs of TensorPOD (each 64-bit) followed by their size and + // stride info for 3 tensors: + // [t0POD][t1POD][t2POD]... + // [t0 sizes][t0 strides][t1 sizes][t1 strides][t2 sizes][t2 strides] + std::vector data; +}; + +// public view of compressed CompleteArgumentInfo +struct CompleteArgumentInfo { + CompleteArgumentInfo(const CompleteArgumentSpec& spec, const int i) + : spec(spec), i(i) {} + bool isTensor() const { + return pod(i).is_tensor; + } + at::ScalarType type() const { + return at::ScalarType(pod(i).type); + } + bool defined() const { + return pod(i).defined; + } + bool requires_grad() const { + return pod(i).requires_grad; + } + at::Device device() const { + return at::Device( + DeviceType(pod(i).dev_type), + static_cast(pod(i).device)); + } + int ndimension() const { + // See [valid range], it is always valid to ask for offset for (i + 1) + return (sizes_strides_offset(i + 1) - sizes_strides_offset(i)) / 2; + } + at::IntArrayRef sizes() const { + return at::IntArrayRef( + spec.sizes_strides() + sizes_strides_offset(i), ndimension()); + } + at::IntArrayRef strides() const { + int ndim = ndimension(); + return at::IntArrayRef( + spec.sizes_strides() + sizes_strides_offset(i) + ndim, ndim); + } + operator TypePtr() const { + if (!defined()) + return TensorType::get(); + return TensorType::create( + type(), + device(), + c10::VaryingShape{sizes()}, + c10::VaryingShape{strides()}, + requires_grad()); + } + + private: + // offsetinto sizes_strides() array where the sizes start for tensor j + // [valid range] valid range is [0, ninputs] + // (i.e. you can ask for the offset at ninputs, which would be the offset of + // the next tensor if it existed) + int sizes_strides_offset(int j) const { + if (j == 0) + return 0; + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + return 2 * pod(j - 1).total_dims; + } + const CompleteArgumentInfoPOD& pod(int j) const { + return spec.tensor_info().at(j); + } + const CompleteArgumentSpec& spec; + const int i; +}; + +inline std::ostream& operator<<(std::ostream& out, const ArgumentInfo& info) { + if (!info.defined()) { + return out << ""; + } + out << "Tensor(device=" << info.device() << ", type=" << toString(info.type()) + << ", requires_grad=" << info.requires_grad() << ", dims=" << info.dim() + << ")"; + return out; +} + +inline std::ostream& operator<<(std::ostream& out, const ArgumentSpec& spec) { + out << "{"; + for (const auto i : c10::irange(spec.numTensors())) { + if (i > 0) + out << ", "; + out << spec.tensorAt(i); + } + out << "; "; + for (const auto i : c10::irange(spec.numOptionals())) { + if (i > 0) + out << ", "; + out << spec.isPresent(i); + } + out << "}"; + return out; +} + +inline std::ostream& operator<<( + std::ostream& out, + const CompleteArgumentInfo& info) { + if (!info.defined()) { + return out << ""; + } + out << "Tensor(device=" << info.device() << ", type=" << toString(info.type()) + << ", requires_grad=" << info.requires_grad() + << ", sizes=" << info.sizes() << ", strides=" << info.strides() << ")"; + return out; +} + +inline std::ostream& operator<<( + std::ostream& out, + const CompleteArgumentSpec& spec) { + out << "{"; + for (const auto i : c10::irange(spec.size())) { + if (i > 0) + out << ", "; + out << spec.at(i); + } + out << "}"; + return out; +} + +inline CompleteArgumentInfo CompleteArgumentSpec::at(size_t i) const { + return CompleteArgumentInfo(*this, i); +} + +inline c10::optional convertOptional( + c10::optional const& from) { + return (from) ? c10::optional(static_cast(*from)) + : c10::optional{}; +} + +} // namespace torch::jit + +namespace std { + +template +struct hash> { + size_t operator()(const c10::VaryingShape& vs) const { + return c10::get_hash( + vs.size(), + vs.size() ? vs.sizes().value() : std::vector>()); + } +}; + +template <> +struct hash { + size_t operator()(const c10::TensorType& ptt) const { + return c10::get_hash< + c10::optional, + c10::VaryingShape, + c10::VaryingShape, + c10::optional>( + torch::jit::convertOptional(ptt.scalarType()), + ptt.sizes(), + ptt.strides(), + ptt.requiresGrad()); + } +}; + +template <> +struct hash { + size_t operator()(const torch::jit::ArgumentSpec& spec) const { + return spec.hashCode(); + } +}; +template <> +struct hash { + size_t operator()(const torch::jit::CompleteArgumentSpec& spec) const { + return spec.hashCode(); + } +}; +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h new file mode 100644 index 0000000000000000000000000000000000000000..32a8166caf0e5936f3eea292aa7a895ad6ddbc58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include + +#include +#include + +namespace torch::jit { + +using value_list = std::vector; +// clang-format off +// Example showcasing how Gradient is constructed: +// +// Let's assume we have a function f, `m` and `n` do not require grad +// (`n` can depend only on `m`): +// y, n = f(x, m) +// +// Now, let's assume that the reverse of f (called f') needs to use values of `x`, `t` and `y`. +// `t` is an intermediate value produced in the body of f, and let's assume that it requires +// grad too. +// +// In this case differentiate(f) will return this: +// y, n, t = f(x, m) // `t` is appended to the output list +// dx = f'(dy, dt, x, t, y) // No `dm` or `dn` because they do not require gradient +// // All needed values from f are prepended to the input list +// +// f_real_outputs = 2 // Only first two outputs were present in f originally +// df_input_vjps = {0, 2} // i.e. connect grad_fn of y and t variables produced by f, +// y t // with y's output_nr = 0 and t's output_nr = 1 +// df_input_captures = {I0, O2, O0} // Order matches the prefix of inputs to df +// x t y +// df_output_vjps = {0} // i.e. connect next_edge[0] of grad_fn to x's (grad_fn, output_nr). +// +// Terminology: vjp = vector-jacobian product +// clang-format on + +struct Gradient { + explicit operator bool() const { + return df != nullptr; + } + std::shared_ptr f; + std::shared_ptr df; + + // Describes how to construct outputs of f from what its graph will return. + // This is necessary because some trailing outputs are intermediates produced + // only to be saved for df (and should be ignored). + size_t f_real_outputs = 0; // initialized for safety. + + // df inputs are split into two sections: vjps (aka grad_outputs) and + // captures. VJPs are "seeds" for the gradient computation given for each + // input capture of an Output kind. Captures are values the need to be saved + // when f is run. We handle inputs specially, because this allows us to avoid + // adding extra vjps as df inputs. + + std::vector df_input_vjps; // Offsets into f's outputs. + // capture can come from inputs or outputs + std::vector df_input_captured_inputs; // Offsets into f's inputs + std::vector df_input_captured_outputs; // Offsets into f's outputs + + // df will produce vjps for a subset of inputs of f that required grad. + // df_output_vjps[idx] == inp_idx means that idx-th output of df produces a + // vjp for inp_idx-th input of f. + std::vector df_output_vjps; // Offsets into f's inputs. + + // How to use gradient to implement a differentiable autograd function: + // When running f: + // - Unwrap input Variables + // - Run f's graph + // - Create grad_fn + // - Wrap outputs in Variables (assume we have a tensor_outputs array): + // outputs = map(Variable, tensor_output) + // for i, offset in enumerate(df_input_vjps): + // outputs[offset].set_grad_fn(grad_fn, output_nr=i) + // - Use df_output_vjps to connect next_edges of grad_fn: + // for idx in df_output_vjps: + // grad_fn.add_next_edge(inputs[idx].gradient_edge()) + // - Save captures for df (care needs to be taken to use SavedVariables for + // inputs and outputs that we will actually return) + // - Return outputs[:f_real_outputs] + // + // When running df: + // - Concatenate received vjps and captured Variables + // - Interpret df + // - Wrap outputs of df into Variables (that don't require grad) +}; +TORCH_API Gradient differentiate(std::shared_ptr& graph); + +// can we take a derivative of this node symbolically? +TORCH_API bool isDifferentiable(const Node* n); +TORCH_API bool isDifferentiable(Graph& g); +TORCH_API bool isZero(Value* v); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h new file mode 100644 index 0000000000000000000000000000000000000000..e1aff151f35e421e1d06be6de259953b83c23ba1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +// Calculates the number of args that need to be passed in. +// Less args may be needed if defaults are provided. +// Returns: {number args needed, number of out args} +inline std::pair CalculateNecessaryArgs( + const std::vector& schema_args, + at::ArrayRef actual_inputs, + bool allow_trailing_out_args) { + if (schema_args.empty()) { + return std::make_pair(0, 0); + } + + // count number of out arguments + int64_t schema_idx = static_cast(schema_args.size()) - 1; + if (allow_trailing_out_args) { + // skip over out arguments in the end. + while (schema_idx >= 0) { + const auto& current_arg = schema_args.at(schema_idx); + if (!current_arg.is_out()) { + break; + } + schema_idx--; + } + } + + int64_t num_out = static_cast(schema_args.size()) - schema_idx - 1; + + if (schema_args.size() < actual_inputs.size()) { + return std::make_pair(actual_inputs.size(), num_out); + } + + // if it is the default args, we reset the index to the last element + if (!allow_trailing_out_args) { + schema_idx = schema_args.size() - 1; + } + // keeps track of trailing unnecessary args + while (schema_idx >= 0) { + // this means it is not default argument, so it is necessary + if (!schema_args.at(schema_idx).default_value().has_value()) { + return std::make_pair(schema_idx + 1, num_out); + } else { + auto schema_value = + schema_args.at(schema_idx).default_value().value().toIValue(); + // non-const value will become nullptr here, so will be marked necessary + // non-const would include prim::ListConstruct, prim::DictConstruct as + // well. + auto actual_value = toIValue(actual_inputs[schema_idx]); + if (!actual_value.has_value()) { + return std::make_pair(schema_idx + 1, num_out); + } + // if the IR has same value as default value of the schema, + // it is not necessary argument. + if (schema_value != actual_value.value()) { + return std::make_pair(schema_idx + 1, num_out); + } + } + schema_idx--; + } + return std::make_pair(0, num_out); +} + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h new file mode 100644 index 0000000000000000000000000000000000000000..64d514374f58e69b732133ce324053d6d1bebc4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +/// Registration class for new operators. Effectively calls +/// `torch::jit::registerOperator` for every supplied operator, but allows doing +/// so in the global scope when a `RegisterOperators` object is assigned to a +/// static variable. +/// Note: This is *not* the custom operator API. If you want to register custom +/// operators, take a look at torch::RegisterOperators. +struct TORCH_API RegisterOperators { + RegisterOperators() = default; + + /// Registers a vector of already created `Operator`s. + /// The operator element is now optional to filter null ops. It's backward + /// compatible and works for selective operator registration. + explicit RegisterOperators(std::vector> operators) { + for (c10::optional& o : operators) { + if (o) { + registerOperator(std::move(o.value())); + } + } + } +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..8633609bcf2a89d214a501d11cc803c86492b198 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h @@ -0,0 +1,33 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +TORCH_API c10::optional> GetDecomposition( + const FunctionSchema& schema); + +TORCH_API void RegisterDecomposition( + const FunctionSchema& schema, + std::shared_ptr g); + +TORCH_API void RunDecompositions(std::shared_ptr g); + +TORCH_API c10::optional GetDecompositionFunction( + const FunctionSchema& schema); + +// For invocation in C++, recommended is to assign to static local variable +TORCH_API Function* GetDecompositionExecutor(const char* schema_literal); + +TORCH_API Function* GetDecompositionExecutor(const FunctionSchema& schema); + +TORCH_API void run_jit_decomposition( + const c10::OperatorHandle& op, + torch::jit::Stack* stack); + +TORCH_API bool has_jit_decomposition(const FunctionSchema& schema); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h new file mode 100644 index 0000000000000000000000000000000000000000..08b5750957b2ae31deacaaae0deae35473c91fce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedDecompositions(); + +TORCH_API const OperatorMap& GetDecompositionMapping(); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f00272a999f3d9431528db7d8e74ff0cc3d823 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h @@ -0,0 +1,29 @@ +#pragma once +#include +#include + +namespace torch::jit { + +struct ExceptionMessage { + ExceptionMessage(const std::exception& e) : e_(e) {} + + private: + const std::exception& e_; + friend std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg); +}; + +inline std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg) { + auto c10_error = dynamic_cast(&msg.e_); + if (c10_error) { + out << c10_error->what_without_backtrace(); + } else { + out << msg.e_.what(); + } + return out; +} + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..d82d69ad5dce531b444dd56d1acbc586ff83bb8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h @@ -0,0 +1,148 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +C10_DECLARE_bool(torch_jit_enable_new_executor); + +C10_DECLARE_bool(torch_jit_execution_plan_reuse_code_graph); + +namespace torch::jit { +struct GraphExecutorState; +struct Code; + +enum ExecutorExecutionMode { + SIMPLE, + PROFILING, +}; + +struct ExecutionPlan { + ExecutionPlan() = default; + ExecutionPlan(std::shared_ptr graph, std::string function_name) + : code(graph, std::move(function_name)), + graph( + FLAGS_torch_jit_execution_plan_reuse_code_graph + ? code.graph() + : std::move(graph)) {} + + operator bool() const { + return static_cast(graph); + } + + Code code; + std::shared_ptr graph; +}; + +// Notice that those structs don't manage lifetime of their members. +// They are only valid only right after you call getDebugState() and should +// never be used again once another GraphExecutor function is called. + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct GraphExecutorState { + const Graph* graph = nullptr; + ExecutionPlan fallback; // XXX: members of this field are optional + std::unordered_map execution_plans; +}; + +struct TORCH_API EnableProfilingGuard { + EnableProfilingGuard(); + ~EnableProfilingGuard(); + + private: + bool old_executor_mode = false; + bool old_get_optimize = false; +}; + +struct GraphExecutorImplBase; +struct TORCH_API GraphExecutor { + GraphExecutor() = default; + GraphExecutor(const std::shared_ptr& graph, std::string function_name); + + GraphExecutor( + const std::shared_ptr& graph, + std::string function_name, + ExecutorExecutionMode executor_mode); + + void run(Stack& inputs); + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch); + + // `remaining_bailout_depth` stands for the maximum number of profiled and + // specialized recompilations allowed for the current `GraphExecutor`. if + // remaining_bailout_depth is equal to 0, `GraphExecutor` won't perform any + // profiling and specialization. This is also equivalent to the + // SIMPLE_EXECUTOR mode. if remaining_bailout_depth is greater than 0, + // `GraphExecutor` will profile and specialize its input graph based on the + // profiled information whenever a bailout check is failed/triggered, a new + // `GraphExecutor` will be created. This new `GraphExecutor`'s + // remaining_bailout_depth will be reduced by 1. + // If no bailout depth is passed, the depth will be initialized from the + // current global fusion strategy settings. + const ExecutionPlan& getPlanFor( + Stack& inputs, + c10::optional remaining_bailout_depth = c10::nullopt); + GraphExecutorState getDebugState(); + + void debugFlushCompilationCache(); + + bool isOptimized() const; + + private: + std::shared_ptr pImpl; +}; + +TORCH_API Node* replaceBlockWithFallbackGraph( + Block* b, + ArrayRef inputs); + +// These passes need to run before it is valid to pass to the interpreter +// regardless of whether sizes have been specialized or not. +TORCH_API void runRequiredPasses(const std::shared_ptr& g); + +TORCH_API void debugSetFusionGroupInlining(bool state); +TORCH_API bool getFusionGroupInlining(); + +TORCH_API void debugSetAutodiffSubgraphInlining(bool state); +TORCH_API std::shared_ptr lastExecutedOptimizedGraph(); + +TORCH_API std::atomic& getProfilingMode(); +TORCH_API std::atomic& getExecutorMode(); +TORCH_API std::atomic& getNumProfiledRuns(); +TORCH_API size_t getBailoutDepth(); +TORCH_API bool IsNewExecutorEnabled(); + +struct TORCH_API GraphOptimizerEnabledGuard { + GraphOptimizerEnabledGuard(bool state) + : old_state_(getGraphExecutorOptimize()) { + setGraphExecutorOptimize(state); + } + + ~GraphOptimizerEnabledGuard() { + setGraphExecutorOptimize(old_state_); + } + + bool old_state_; +}; + +namespace detail { + +GraphExecutor* getGradExecutor(Operation& op); + +GraphExecutor* getDifferentiableGraphOpExecutor(Operation& op); + +// for debugging information we expose a way to get the last actually +// run graph. Previous approaches allowed querying the GraphExecutor +// for what graph it would run in certain circumstances (graphFor), but +// this is fragile because we sometimes change how these decisions are made. +// This interface still allows our tests to look at optimized graphs, but +// with less plumbing. +} // namespace detail + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..3aae2eb85279664b9c5a1a10e0c66669ad95b9f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h @@ -0,0 +1,113 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +void packGradient(const Gradient& gradient, Node* dnode); +bool needsGradient(const std::shared_ptr& graph); +void runOptimization( + std::shared_ptr& graph, + bool unroll_non_constant_loops = true, + bool const_prop_user_classes = true); +void runNondiffOptimization( + std::shared_ptr& graph, + bool strict_fuser_check = false); +void debugSetAutodiffSubgraphInlining(bool state); +bool TORCH_API getAutodiffSubgraphInlining(); + +void debugSetFusionGroupInlining(bool state); +bool getFusionGroupInlining(); + +// Tunable parameters for deciding when to create/keep subgraphs of +// differentiable code +const size_t autodiffSubgraphNodeThreshold = 2; +const size_t autodiffSubgraphInlineThreshold = 5; + +// a Graph can be created via tracing, or via a language-based frontend +// GraphExecutor runs it. It can run the same graph on many different sizes +// and different requires_grad states, and handles specializations for each +// situation. GraphExecutor is completely unaware of tracing or module +// parameters to keep the tracing concerns separated. +struct GraphExecutorImplBase { + static std::shared_ptr prepareGraph( + const std::shared_ptr& graph) { + auto copy = graph->copy(); + EraseShapeInformation(copy); + return copy; + } + + GraphExecutorImplBase( + const std::shared_ptr& graph, + std::string function_name) + : graph(prepareGraph(graph)), + function_name_(std::move(function_name)), + num_inputs(this->graph->inputs().size()), + num_outputs(this->graph->outputs().size()) {} + + // entry point where execution begins + void run(Stack& stack); + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch); + + virtual const ExecutionPlan& getPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth = c10::nullopt) = 0; + virtual GraphExecutorState getDebugState() = 0; + virtual ~GraphExecutorImplBase() = default; + + virtual bool isOptimized() const { + return false; + } + + protected: + friend struct GraphExecutor; + + // The unoptimized starting graph. This field is effectively const, but we + // can't make it so because Graph::copy() is not const (and making it const is + // not that easy at this point). + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr graph; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::string function_name_; + + // If false, we'll run the graph as we get it, without any optimizations. + // Useful for debugging. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const size_t num_inputs; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const size_t num_outputs; + + // GraphExecutors can be accessed from multiple threads, so this thread needs + // to be held every time we access the fallback or plan_cache. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::mutex compile_mutex; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..c008902a2e8f71f1cb9eb2ce58b250971f488b50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h @@ -0,0 +1,147 @@ +#include + +namespace torch::jit { + +// This class facilitates depth-first iteration over all nodes in a graph. +class DepthFirstGraphNodeIterator { + Node* current_; + + public: + // Constructor. + explicit DepthFirstGraphNodeIterator(std::shared_ptr& graph) + : current_(*(graph->block()->nodes().begin())) {} + + // Moves up and to the next node (may move up recursively). + void move_up() { + if (current_ == nullptr) { + return; + } + // Basically we start from the child block (which is current_) + // and we try to find the block that owns it. Now we need to check + // if that block is the graph root block, or if it is an If/Loop/etc + // block. + // + // If it's the graph root block we can stop because there is no "up" + // but if it is a node (e.g. If/Loop/etc) we need to apply logic + // based on where we are coming from to move to the next block. + // This might mean that we need to traverse up again (e.g. if we've + // reached the end of the else clause in an if block we need to go) + // up to the parent block that contains the if. + // + // Similarly if we've reached the end of the parent block containing + // the else clause we might need to go up again so this is a recursive + // function. + // + // BlockNode (if/loop/with) + // | + // [Block1] ... [Block2] + // | + // [ Node1, Node2, Node3, FromNode] + // + auto parent_block = current_->owningBlock(); + TORCH_INTERNAL_ASSERT(parent_block, "Every node must be owned by a block"); + + // Get the node that owns the parent block. This node has to be an if, + // loop, or with. + auto parent_node = parent_block->owningNode(); + if (parent_node == nullptr) { + // If there's no node that owns this current block then we're at the + // top of the graph and since we're trying to move up we have reached + // the end of the traversal. + current_ = nullptr; + return; + } + + // Check the type of node this root is. + if (parent_node->kind() == prim::If) { + // Need to check if we came from the `then` branch or the `else` branch. + auto* then_block = parent_node->blocks().at(0); + auto* else_block = parent_node->blocks().at(1); + + if (parent_block == else_block) { + // If else block then we move to the next node in the parent block. + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } else { + // If then block then move to the else block if it is not empty. + TORCH_INTERNAL_ASSERT(parent_block == then_block); + bool else_block_empty = + else_block->nodes().begin() == else_block->nodes().end(); + + if (!else_block_empty) { + current_ = *(else_block->nodes().begin()); + } else { + // Since it's empty we move to the next node. + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } + } + } else if ( + parent_node->kind() == prim::Loop || + parent_node->kind() == prim::With) { + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } else { + TORCH_INTERNAL_ASSERT( + false, "Only if/loop/with nodes should have child blocks"); + } + } + + // Moves to the next adjacent node or up in to the parent if that is not + // possible. + void move_next() { + if (current_ == nullptr) { + return; + } + + // Increment to the next node in the current block. + current_ = current_->next(); + + // Check if we're at the end of the block. If so we need + // to move upwards (if it makes sense to). + if (current_->kind() == prim::Return) { + move_up(); + } + } + + // Moves to the next node in the graph into children if it can. + void move_into() { + if (current_ == nullptr) { + return; + } + + // Check if we're currently on a node that contains sub-nodes. + if (current_->kind() == prim::If || current_->kind() == prim::Loop || + current_->kind() == prim::With) { + auto* first_block = current_->blocks().at(0); + current_ = first_block->param_node(); + // Move next will move up and out of the current node if the block is + // empty. `move_up` which is called by `move_next` will handle the + // difference between If, Loop, and With blocks appropriately. + move_next(); + } else { + move_next(); + } + } + + // Get the next Node in the graph. \returns nullptr if there are no nodes + // left. + Node* next() { + auto result = current_; + + // Try move into the existing node to set the next node to be returned. + // This will move to the next node if not possible, or move upwards and + // to the next. + move_into(); + + return result; + } +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h new file mode 100644 index 0000000000000000000000000000000000000000..73c78adbda03e5bd307979a28abcc648050c8a03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { +// instruction look like: +// op_code X, N +// meaning of X, N depend on the op: +// O - index into operator table +// R - index into register table +// I - literal integer +// C - index into constant table +// P - jump offset relative to beginning of current instruction +// F - index into function table +// T - index into the type table, used for guard instructions +// S - index into object slots +// C - index into code table + +#define FORALL_OPCODES(_) \ + _(OP, "O") /* invoke operator X */ \ + _(OPN, "OI") /* invoke vararg operator X with N arguments */ \ + _(LOAD, "R") /* push a value from a register X */ \ + _(MOVE, "R") /* push a value from register X, clearing the register */ \ + _(STOREN, "RI") /* store N values to registers [X, X+N) */ \ + _(STORE, "R") /* store 1 value to registers X */ \ + _(DROP, "") /* drop 1 value from the top of the stack */ \ + _(DROPR, "R") /* clear register X */ \ + _(LOADC, "C") /* push the constant X */ \ + _(JF, "P") /* pop the top of the stack, if false, branch to P */ \ + _(JMP, "P") /* unconditional branch to X */ \ + _(LOOP, "PI") /* perform a loop, X is where to branch if cond is false */ \ + _(RET, "") /* exit execution */ \ + _(WAIT, "") /* wait for a future to be complete */ \ + _(CALL, "F") /* call function X */ \ + _(GUARD, "T") /* check a guard against type_table, true if passes */ \ + _(TYPECHECK, "TN") /* check each type of input[i] against type_table[X+N] */ \ + _(FAIL_GUARD, "T") /* fail a guard, patch back to GUARD */ \ + _(PROFILE_OP, "F") /* get a callback from profile_function_table at X */ \ + _(TAIL_CALL, "F") /* replace current frame with function F */ \ + _(INTERFACE_CALL, "CI") /* call method X on the first argument (of N) */ \ + _(GET_ATTR, "S") /* get attribute from slot X in an Object */ \ + _(SET_ATTR, "S") /* set attribute to slot X in an Object */ \ + _(LIST_UNPACK, "I") /* unpack list expecting length I */ \ + _(TUPLE_CONSTRUCT, "I") /* construct a tuple using X inputs */ \ + _(NAMED_TUPLE_CONSTRUCT, \ + "TI") /* construct a tuple of type X, using N inputs */ \ + _(LIST_CONSTRUCT, "TI") /* construct a list of type X, using N inputs */ \ + _(DICT_CONSTRUCT, "TI") /* construct a dict of type X, using N inputs */ \ + _(CREATE_OBJECT, "T") /* create an object of type X */ \ + _(ISINSTANCE, "TI") /* check object is one of types[X:X+N] */ \ + _(TUPLE_SLICE, "II") /* slice tup[X:(X+N)] */ \ + _(TUPLE_INDEX, "") /* get the value from a tuple at that index */ \ + _(RAISE_EXCEPTION, "") /* throws the exception from Python */ \ + _(DICT_INDEX, "") /* gets the value from the dict for given key */ \ + _(UNCHECKED_CAST, "") /* perform an unchecked cast operation */ \ + _(__IS__, "") /* performs `is` operator from Python */ \ + _(UN_INITIALIZED, \ + "") /* sets default values to variables that are uninitialized */ \ + _(__ISNOT__, "") /* performs `is not` operator from Python */ \ + _(FORMAT, "I") /* performs string format function `f strings` or `{}.format` \ + the number of inputs in stored in X */ \ + _(DEVICE, "") /* invokes aten::device for a Tensor */ \ + _(DTYPE, "") /* invokes aten::dtype for a Tensor */ \ + _(DIM, "") /* invokes aten::dim for a Tensor */ \ + _(__NOT__, "") /* performs `not` operator from Python */ \ + _(TO_LIST, "") /* convert the input to a list */ \ + _(NUM_TO_TENSOR, \ + "") /* performs the conversion of a number/scalar to Tensor */ \ + _(IS_CUDA, "") /* invokes aten::is_cuda for a Tensor */ \ + _(FORK, "CN") /* launch a thread to run code entry x with N inputs */ \ + _(WARN, "I") /* emit a warning with line information */ \ + _(ENTER, "EN") /* enter scope of a contextmanager */ \ + _(EXIT, "EX") /* exit the last entered contextmanager */ \ + _(AWAITABLE, "CN") /* initialize await for code entry x with N inputs */ + +enum OpCode : uint8_t { +#define DEFINE_OP(op, _) op, + FORALL_OPCODES(DEFINE_OP) +#undef DEFINE_OP +}; + +struct Instruction { + OpCode op; + uint8_t unused; + uint16_t N; + int32_t X; + // TODO: check for overflow + Instruction(OpCode op, int32_t X, uint16_t N) + : op(op), unused(0), N(N), X(X) {} +}; +std::ostream& operator<<(std::ostream& out, Instruction inst); + +bool isOpSupportedInMobile(OpCode op); +char const* toString(OpCode op); +OpCode parseOpCode(const char* str); +std::ostream& operator<<(std::ostream& out, Instruction inst); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..e47a581fd5defe677253530273950b5c4ca938d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h @@ -0,0 +1,159 @@ +#pragma once +#include +#include +#include + +#include +#include +#include +#include +#include + +C10_DECLARE_bool(torch_jit_disable_warning_prints); +C10_DECLARE_bool(torch_jit_enable_rethrow_caught_exception); + +namespace at { +class Tensor; +TORCH_API void launch(std::function func); +} // namespace at +namespace c10 { +struct IValue; +struct OperatorName; +} // namespace c10 + +namespace torch::jit { + +// The interpreter run Graphs with Tensor inputs and Tensor outputs +// a separate component in the autograd handles unwrapping and wrapping +// variable objects for use in the interpreter. +namespace interpreter { +struct CodeImpl; +} + +struct Node; +struct GraphExecutor; +struct InterpreterStateImpl; +struct Graph; +struct Node; +struct Instruction; +using Stack = std::vector; +using c10::ivalue::Future; +using TaskLauncher = std::function)>; + +struct TORCH_API Code { + Code() = default; + explicit Code(interpreter::CodeImpl* pImpl); + // remaining_bailout_depth is irrelevant in a `Code` object unless the `Code` + // is directly created by `GraphExecutor` in which case it's likely to contain + // `prim::BailOut`s to control the maximum depth of bailout chains + explicit Code( + const std::shared_ptr& graph, + std::string function_name, + size_t remaining_bailout_depth = 0); + + const std::vector& grad_executors(); + const std::vector& diff_graph_op_executors(); + + explicit operator bool() const { + return pImpl != nullptr; + } + size_t num_inputs() const; + size_t num_outputs() const; + size_t num_bailouts() const; + const std::vector& constant_table() const; + const std::vector& type_table() const; + const std::vector& instructions() const; + const std::unordered_map& op_to_num_specified_args() + const; + const std::vector& instructions_source() const; + void request_bailout(size_t index); + size_t register_size() const; + std::shared_ptr graph() const; + + private: + std::shared_ptr pImpl; + friend struct InterpreterStateImpl; + friend std::ostream& operator<<(std::ostream& out, const Code& code); +}; + +struct TORCH_API MobileCode : Code { + explicit MobileCode( + const std::shared_ptr& graph, + std::string function_name, + bool emit_default_input_instructions = true, + bool support_default_args_before_out = true, + bool emit_promoted_ops = true, + size_t remaining_bailout_depth = 0); +}; + +struct InterpreterState { + TORCH_API InterpreterState( + const Code& code, + TaskLauncher taskLauncher = at::launch); + TORCH_API void run(Stack& stack); + TORCH_API c10::intrusive_ptr runAsync(Stack& stack); + c10::intrusive_ptr getFuture(); + + private: + InterpreterState(c10::intrusive_ptr pImpl); + // Ideally we should use c10::intrusive_ptr for pImpl; + // but intrusive_ptr requires full definition of InterpreterStateImpl, + // which we need to hide in the header. + c10::intrusive_ptr pImpl; + friend struct InterpreterStateImpl; +}; + +// Created by wait() +struct Suspend : public std::exception { + const char* what() const noexcept override { + return "Suspend"; + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit Suspend(c10::intrusive_ptr future_) + : future(std::move(future_)) {} + + c10::intrusive_ptr future; +}; + +// InterpreterContinuation propagates dist_autograd_context_id +// through (and only through) the forward pass manually, other +// thread local settings are propagated with ThreadLocalState +struct InterpreterContinuation { + InterpreterContinuation( + InterpreterState state_, + Stack stack_, + int64_t dist_autograd_context_id = 0, + c10::optional tls_state = c10::nullopt) + : state(std::move(state_)), + stack(std::move(stack_)), + tls_state_(std::move(tls_state)) +#ifdef USE_DISTRIBUTED + , + dist_autograd_context_id_(dist_autograd_context_id) +#endif + { + } + + void operator()(); + + private: + InterpreterState state; + Stack stack; + c10::optional tls_state_ = c10::nullopt; +#ifdef USE_DISTRIBUTED + int64_t dist_autograd_context_id_; +#endif +}; + +// what is the tensors type, including state from the current execution context +// that modifies how the tensor behaves. For instance if no_grad is enabled +// this will cause the TensorType to have requires_grad=False. +TORCH_API at::TensorTypePtr tensorTypeInCurrentExecutionContext( + const at::Tensor& t); + +// current (TLS) TorchScript interpreter callstack +TORCH_API std::vector currentCallstack(); +TORCH_API std::vector currentModuleHierarchy(); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h new file mode 100644 index 0000000000000000000000000000000000000000..728675ed7841835aba35771b4b1ba5e827d2c5d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API JITException : public std::runtime_error { + explicit JITException( + const std::string& msg, + c10::optional python_class_name = c10::nullopt, + c10::optional original_msg = c10::nullopt); + + c10::optional getPythonClassName() const { + return python_class_name_; + } + + // the original msg if this is from a python exception. The interpretor has + // changed the original message by adding "The following operation failed in + // the TorchScript interpreter." in front of it in the handleError function. + c10::optional getOriginalMsg() const { + return original_msg_; + } + + static const std::string& getCaughtOriginalMsg(); + static const std::string& getCaughtPythonClassName(); + static void setCaughtOriginalMsg(const std::string& msg); + static void setCaughtPythonClassName(const std::string& pythonClassName); + + private: + c10::optional python_class_name_; + c10::optional original_msg_; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..12be844e35a91a8ca9e775bd030764b58452a172 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h @@ -0,0 +1,8 @@ +#include +#include + +namespace torch::jit { +TORCH_API std::shared_ptr TraceGraph( + std::shared_ptr graph, + Stack& stack); +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..b0b67c68088389bdd35e72c00cd7d1005399cb1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch::jit::logging { + +class LoggerBase { + public: + TORCH_API virtual void addStatValue( + const std::string& stat_name, + int64_t val) = 0; + virtual ~LoggerBase() = default; +}; + +TORCH_API LoggerBase* getLogger(); +TORCH_API LoggerBase* setLogger(LoggerBase* logger); + +// No-op logger. This is the default and is meant to incur almost no runtime +// overhead. + +class NoopLogger : public LoggerBase { + public: + void addStatValue(const std::string& stat_name, int64_t val) override {} + ~NoopLogger() override = default; +}; + +// Trivial locking logger. Pass in an instance of this to setLogger() to use it. +// This keeps track of the sum of all statistics. +// +// NOTE: this is not written in a scalable way and should probably only be used +// in the single-threaded case or for testing. +class TORCH_API LockingLogger : public LoggerBase { + public: + void addStatValue(const std::string& stat_name, int64_t val) override; + virtual int64_t getCounterValue(const std::string& name) const; + enum class AggregationType { SUM = 0, AVG = 1 }; + void setAggregationType(const std::string& stat_name, AggregationType type); + ~LockingLogger() override = default; + + private: + mutable std::mutex m; + struct RawCounter { + RawCounter() : sum(0), count(0) {} + int64_t sum; + size_t count; + }; + std::unordered_map raw_counters; + std::unordered_map agg_types; +}; + +// Make this struct so the timer internals are opaque to the user. +struct JITTimePoint { + std::chrono::time_point point; +}; + +TORCH_API JITTimePoint timePoint(); +TORCH_API void recordDurationSince( + const std::string& name, + const JITTimePoint& tp); + +namespace runtime_counters { +constexpr const char* GRAPH_EXECUTORS_CONSTRUCTED = + "pytorch_runtime.graph_executors_constructed"; +constexpr const char* GRAPH_EXECUTOR_INVOCATIONS = + "pytorch_runtime.graph_executor_invocations"; +constexpr const char* EXECUTION_PLAN_CACHE_HIT = + "pytorch_runtime.execution_plan_cache_hit"; +constexpr const char* EXECUTION_PLAN_CACHE_MISS = + "pytorch_runtime.execution_plan_cache_miss"; + +inline std::vector allRuntimeCounters() { + return { + GRAPH_EXECUTORS_CONSTRUCTED, + GRAPH_EXECUTOR_INVOCATIONS, + EXECUTION_PLAN_CACHE_HIT, + EXECUTION_PLAN_CACHE_MISS}; +} + +} // namespace runtime_counters + +} // namespace torch::jit::logging diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h new file mode 100644 index 0000000000000000000000000000000000000000..bcab476441e293a5d656e29b7f55d0bdd1af9e03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h @@ -0,0 +1,345 @@ +// in memory description of all ATen Ops similar to Caffe2 schema +// once C10 exists this can be removed, or stubbed out, but we need +// it now to implement correct semantic checking for script +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct Node; +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::Symbol; + +using OperationCreator = Operation (*)(const Node*); + +namespace { +const std::array kJitOnlyOperatorTags = { + at::Tag::pt2_compliant_tag}; +} + +/* + * Note: JIT relies on Operator instances having static lifetime, because + * it for example stores a non-owning FunctionSchema* pointer in the Node class, + * which points to the function schema stored in the Operator instance. + * Also, jit::Operator is meant to store more operator related information like + * symbolic derivatives, which also requires them to have static lifetime + * so that changes to symbolic derivatives are remembered. + * + * Currently, the JIT operator library contains a jit::Operator instance + * with a wrapper for each c10 operator. The c10 operator library registers + * those wrappers using listeners in register_c10_ops.cpp. + * TODO Instead of doing it this way, we should only have pure-jit ops in + * the jit library but have the JIT operator lookup look into the c10 library + * too. + */ + +// An Operator is a thin wrapper around either a pure JIT operator (e.g. prim +// ops) or a c10 operator, allowing some common operations and abstracting away +// the concrete operator nature. +struct TORCH_API Operator { + private: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + struct C10Operator final { + c10::OperatorHandle handle_; + Operation op_; + }; + struct UnparsedFunctionSchema final { + std::string schema_string_; + mutable c10::optional alias_analysis_; + }; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + struct JitOnlyOperator final { + // The only valid transition for schema_ is from right->left, i.e. + // when the schema gets parsed. + mutable std::variant schema_; + + std::variant op_; + }; + + public: + Operator(c10::OperatorHandle opHandle, Operation operation) + : op_(C10Operator{std::move(opHandle), std::move(operation)}) {} + + Operator( + std::string schema, + Operation op, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + UnparsedFunctionSchema{std::move(schema), alias_analysis}, + Operation(std::move(op))}) {} + + Operator( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + Operation op, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + FunctionSchema(varArgSchemaWithName( + std::move(name), + std::move(overload_name), + std::move(arguments), + std::move(returns), + alias_analysis)), + std::move(op)}) {} + + Operator( + std::string schema, + OperationCreator op_creator, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + UnparsedFunctionSchema{std::move(schema), alias_analysis}, + op_creator}) {} + + // Helper constructor to register `op` to run + // run for _every_ IR Node where n.kind() == name, regardless of arguments. + // This is accomplished by marking the schema varargs and having no required + // arguments. + Operator( + Symbol name, + OperationCreator op_creator, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + FunctionSchema(varArgSchemaWithName(name, alias_analysis)), + op_creator}) {} + + Operation getOperation(const Node* node = nullptr) const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) { return op.op_; }, + [node](const JitOnlyOperator& op) { + return std::visit( + c10::overloaded( + [](const Operation& op) { return op; }, + [node](const OperationCreator& op_creator) { + return op_creator(node); + }), + op.op_); + }), + op_); + } + + Operation getOperationForDispatchKey(c10::DispatchKey dk) const { + // TODO: some sort of caching mechanism? + return std::visit( + c10::overloaded( + [dk](const C10Operator& op) { + return Operation([op, dk](Stack& stack) { + op.handle_.callBoxedForDispatchKey(dk, stack); + }); + }, + [](const JitOnlyOperator& op) { + TORCH_CHECK( + false, + "calling a JIT operator for dispatch key is not supported"); + return Operation(nullptr); + }), + op_); + } + + const FunctionSchema& schema() const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) -> const FunctionSchema& { + return op.handle_.schema(); + }, + [](const JitOnlyOperator& op) -> const FunctionSchema& { + // we lazily parse schema initialized from strings so that + // we do less work during static operator registration + if (op.schema_.index() == 1) { + auto& unmaterializedSchema = + std::get(op.schema_); + FunctionSchema schema = + parseSchema(unmaterializedSchema.schema_string_); + if (unmaterializedSchema.alias_analysis_.has_value()) { + // TODO What if it gets set later? + schema.setAliasAnalysis( + *unmaterializedSchema.alias_analysis_); + } + op.schema_ = std::move(schema); + } + return std::get(op.schema_); + }), + op_); + } + + c10::ArrayRef getTags() const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) { return op.handle_.getTags(); }, + [](const JitOnlyOperator& op) { + // JitOnlyOperators don't have an c10::OperatorHandle or a way to + // specify tags. We're grandfathering them all into + // pt2_compliant_tag, but for anything else, please just stop + // using JitOnlyOperator. + return c10::ArrayRef(kJitOnlyOperatorTags); + }), + op_); + } + + bool isC10Op() const { + return op_.index() == 0; + } + + c10::AliasAnalysisKind aliasAnalysisKind() const { + const FunctionSchema& schemaRef = schema(); + c10::AliasAnalysisKind alias_analysis = schemaRef.aliasAnalysis(); + + TORCH_CHECK( + alias_analysis == AliasAnalysisKind::FROM_SCHEMA || + !schemaRef.hasAnyAliasInfo(), + "In operator registration: Tried to register operator ", + schemaRef, + " with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA."); + return alias_analysis; + } + + bool hasOperation() const { + return std::visit( + c10::overloaded( + [](const C10Operator&) { return true; }, + [](const JitOnlyOperator& op) { return op.op_.index() == 0; }), + op_); + } + + private: + static FunctionSchema varArgSchemaWithName( + Symbol name, + AliasAnalysisKind alias_analysis) { + auto result = FunctionSchema( + name, + "", + {}, + {}, + /*is_vararg*/ true, + /*is_varret*/ true); + result.setAliasAnalysis(alias_analysis); + return result; + } + + static FunctionSchema varArgSchemaWithName( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + AliasAnalysisKind alias_analysis) { + auto result = FunctionSchema( + std::move(name), + std::move(overload_name), + std::move(arguments), + std::move(returns), + /*is_vararg*/ false, + /*is_varret*/ false); + result.setAliasAnalysis(alias_analysis); + return result; + } + + std::variant op_; +}; + +TORCH_API std::string canonicalSchemaString(const FunctionSchema& schema); + +TORCH_API const std::vector> getAllOperators(); +TORCH_API const std::vector>& getAllOperatorsFor( + Symbol name); +// Returns operators in the order which OpOverloadPacket resolves them. +TORCH_API std::vector> getAllSortedOperatorsFor( + Symbol name); + +// given a operator with an overload name, find the specific operator related to +// it, may return nullptr if no operator exists. +TORCH_API std::shared_ptr findOperatorFor( + const c10::OperatorName& full_name); + +TORCH_API std::vector findSimilarOperators(Symbol input_op); + +TORCH_API void registerOperator(Operator&& op); +TORCH_API void deregisterOperator(const FunctionSchema& schema); + +// XXX: this function is meant to be used with string literals only! +TORCH_API std::shared_ptr getOperatorForLiteral( + const char* signature); + +// Ensure the thing that registers c10 ops is defined. +// Otherwise, our registry will not have c10 ops. You can run into this +// scenario if you're querying registered ops during static init. +// +// This fn is defined in register_c10_ops.cpp +TORCH_API void ensure_c10_registerer_defined(); + +// Used to assert that unschematized operators have an analysis method written +TORCH_API bool aliasAnalysisHasSpecialCaseFor(c10::Symbol sym); + +// A factory function to generate an optional operator. It has two +// instantiations depending on the template bool arg value. The arg can be a +// compile-time function for the selective op registration based on schema +// string. +template +c10::optional OperatorGenerator( + const char* schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return c10::optional(Operator( + std::string(schema_str), std::forward(op), alias_analysis)); +} + +template +c10::optional OperatorGenerator( + torch::detail::SelectiveStr schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return OperatorGenerator( + static_cast(schema_str), + std::forward(op), + alias_analysis); +} + +template +c10::optional OperatorGenerator( + torch::detail::SelectiveStr schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return c10::nullopt; +} + +template +c10::optional OperatorGenerator( + const std::string name, + const std::string overload_name, + const std::vector arguments, + const std::vector returns, + Func&& op, + AliasAnalysisKind alias_analysis) { + return c10::optional(Operator( + name, + overload_name, + arguments, + returns, + std::forward(op), + alias_analysis)); +} + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h new file mode 100644 index 0000000000000000000000000000000000000000..50c41fc3ad39d44262b4da8e54fd4b75b00d8f2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::jit { + +using AliasAnalysisKind = c10::AliasAnalysisKind; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..36feaffb200b655bd452ff822ae7af5149bc2670 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +#include + +namespace torch::jit { + +using PrintHandler = void (*)(const std::string&); + +TORCH_API PrintHandler getDefaultPrintHandler(); +TORCH_API PrintHandler getPrintHandler(); +TORCH_API void setPrintHandler(PrintHandler ph); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..45da1f030e96283abf6bd68c7038435c16c1b7fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h @@ -0,0 +1,73 @@ +#pragma once +#include +#include +#include + +C10_DECLARE_bool(torch_jit_static_then_dynamic); + +C10_DECLARE_bool(torch_jit_always_dynamic); + +namespace torch::jit { + +TORCH_API void runNooptPassPipeline(std::shared_ptr& graph); + +struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase { + ProfilingGraphExecutorImpl( + const std::shared_ptr& graph, + std::string function_name); + + const ExecutionPlan& getPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth) override; + GraphExecutorState getDebugState() override; + ~ProfilingGraphExecutorImpl() override = default; + + void debugFlushCompilationCache(); + + bool isOptimized() const override { + return optimized_plan_.has_value(); + } + + private: + const ExecutionPlan& getOptimizedPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth); + void runProfilingInsensitiveOptimizations(std::shared_ptr& graph); + void runProfilingOptimizations( + std::shared_ptr& graph, + size_t remaining_depth); + void replaceFallbackGraphWithFallbackFunction(Block* b); + FusionBehavior getCurrentBehavior(size_t remaining_depth); + size_t getInstantiatedBailoutDepth(); + void runNoGradOptimizations( + std::shared_ptr& graph, + size_t remaining_bailout_depth); + void runFinalOptimizations(std::shared_ptr& graph); + + void clearTheGraphCompilationIntermediateGraphs(); + + std::unique_ptr pr_; + c10::optional + profiling_plan_; // plan to run in order to profiling the code + c10::optional optimized_plan_; + FusionStrategy fusion_strategy_; + + // this plan is used if getGraphExecutorOptimize is unset + c10::optional fallback_plan_; + // fallback functions are inserted for tensorexpr fusion groups + // and by specialize_autogradzero. Whenever, at runtime, input + // tensor don't match profiled properties, fallback functions are called + // They are the deoptimized version of the logic in fusion groups + // and/or autograd. + // The fallback functions are owned by a GraphExecutor instance + // They only exist in the optimized graph which is a private property + // of the GraphExecutor and only shared with InterpreterState + std::vector> fallback_functions_; + c10::optional remaining_bailout_depth_; + // The time the optimized_plan_ is created. + int32_t time_optimized_plan_created_ = 0; + // Has the extra memory used by the graph for profiling is released? + bool is_graph_extra_memory_released_ = false; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h new file mode 100644 index 0000000000000000000000000000000000000000..c45dcde7b0bf0ea2314eb676ea87e958499ff7a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h @@ -0,0 +1,205 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// We would like to assign each position/axis of a tensor an abstract size +// * For each `tensor` we have a profiled `Value` of a `TensorType` describing +// the properties of the `tensor`. +// * `TensorType` has a property called `symbolic_sizes_` to describe observed +// `tensor.sizes()` +// * `symbolic_sizes_` is a vector of abstract sizes (or +// `std::vector`) where +// * `ShapeSymbol`at `symbolic_sizes_[i]` describes the size value +// (`Dimension`) at `tensor.sizes()[i]` +// * We may see the same `Dimension` at different positions `i` in +// `tensor.sizes()` or even in different `tensor` +// * First, we would like associate the same `ShapeSymbol` to the same +// `Dimension` across **one** profiling execution or run of a TorchScript +// function. +// * The same `ShapeSymbol`s in different positions of `symbolic_shapes_` in +// possibly different `TensorType`s (i.e. `TensorType`s for different +// profiled values) form an implicit set. The elements of such a set are +// called *dimension locations*. +// * These sets allow us to track how the shapes of input arguments of some +// operation relate to operation's output shapes as the input and output +// shapes might share the same `ShapeSymbol`s +// * For **every** profiling run, we would like to maintain the invariant that +// *the same `ShapeSymbol` is always associated with the same `Dimension`*. +// * To maintain this invariant we merge the profiling information from all +// profiling runs, +// * For every two runs, we iterate over all `symbic_shapes_` and compare +// their `ShapeSymbol`s in the same position. +// * if we observe that for every dimension location that has +// the`ShapeSymbol S1` in run #1 there is **only one** `ShapeSymbol S2` in +// the same dimension location in run #2, we conclude that the invariant +// holds. +// * However, if we observe some dimension locations in run #2 have +// `ShapeSymbol S2` and the other ones have `ShapeSymbol S3` we would like +// to partition the virtual set of dimension locations associated with +// `ShapeSymbol S1` into two new subsets, so the invariant holds. +// * The partitioning works by assigning a new symbol to the dimension +// locations (associated with `ShapeSymbol S1`) that have `ShapeSymbol S2` +// and another new symbol to the dimension locations that have `ShapeSymbol +// S3`. In other words, +// * Subset #1 will consist of the dimension locations that in run #2 have +// `ShapeSymbol S2` and will have `ShapeSymbol S4` in those dimension +// locations +// * Subset #2 will consist of the dimension locations that in run #2 have +// `ShapeSymbol S4` and will have `ShapeSymbol S5` in those dimension +// locations +// * The effective result of merging the profiling information from two runs +// is new `TensorTypes` whose `symbolic_sizes_` /dimension locations have +// either `ShapeSymbol S4` or `ShapeSymbol S5`. +// * Partitioning can be done even before we have seen all the dimension +// locations associated with `ShapeSymbol S1` +// * We use `getSymbolInSet` of `ShapeSymbolTable` to remember all +// `ShapeSymbols` from run #2 we observed in the dimension locations +// associated with `ShapeSymbol S1` . +// * For every `ShapeSymbol` from run #2 in the dimension location +// associated with `ShapeSymbol S1` `getSymbolInSet` returns a symbol +// that we assign to the dimension location in a new TensorType. +// * It's important to point out that the same `ShapeSymbol S2` from run +// #2 in two dimension locations that have different `ShapeSymbol`s in +// run #1 are different! These dimension locations will belong to +// different subsets and have different `ShapeSymbol`s after merge. +// * On the other hand, for the same `ShapeSymbol S2` in two dimension +// locations that have `ShapeSymbol S1` in run #1`getSymbolInSet` will +// return the same symbol. + +namespace torch::jit { + +using ::c10::TensorTypePtr; +using Dimension = int64_t; + +TORCH_API void RegisterProfilingNode(const std::function&); + +struct ProfilingRecord; + +// `SetPartitioningHelper` is used to maintain the following invariant: +// For **every** profiling run, *the same `ShapeSymbol` is always associated +// with the same `Dimension`*. +// while merging the profiling information from multiple runs. +struct SetPartitioningHelper { + std::map> + sets2subsets_; + + // `partitionSetByDimension` partitions a virtual set + // of dimension locations associated with ShapeSymbol `symbol` into subsets. + // Partitioning is equivalent to giving (or renaming) a particular + // dimension location a new `ShapeSymbol`. + // The same `Dimension` value in different dimension locations + // that used to have `symbol` will receive the same + // new `ShapeSymbol`, effectively forming a new set. + c10::ShapeSymbol partitionSetByDimension( + Dimension new_size, + c10::ShapeSymbol symbol) { + auto& dims2symbols = getSetForSymbol(symbol); + + if (dims2symbols.count(new_size) == 0) { + auto new_sym = c10::ShapeSymbol::newSymbol(); + dims2symbols[new_size] = new_sym; + return new_sym; + } + + return dims2symbols[new_size]; + } + + private: + std::map& getSetForSymbol(c10::ShapeSymbol s) { + auto& set = sets2subsets_[s]; + // N.B. adding a mapping { s.static_size(), s } + // makes sure we preserve the fact that + // some dimension values remain the same + // across all profiled runs + if (s.is_static()) { + set.insert({s.static_size(), s}); + } + return set; + } +}; + +// ShapeSymbolTable is used by Interpreter +// to assign dimension values to ShapeSymbols +// and fail a guard if the same symbol +// is assigned more than one dimension value. +struct ShapeSymbolTable { + // N.B. we treat static symbols as always assigned + // to themselves + bool isBound(c10::ShapeSymbol s) { + if (s.is_static()) { + return true; + } + return data_.count(s) != 0; + } + + // N.B. we treat static symbols as always assigned + // to themselves + Dimension getValue(c10::ShapeSymbol s) { + if (s.is_static()) { + return s.static_size(); + } + return data_[s]; + } + void assign(c10::ShapeSymbol s, Dimension v) { + TORCH_INTERNAL_ASSERT(!s.is_static()); + data_[s] = v; + } + std::map data_; + // Tries to assign dimension values from `new_sizes` to + // `ShapeSymbol`s `sym_shapes`. + // Returns `true` if every dimension value from `new_sizes` + // can be assigned to the corresponding `ShapeSymbol` from + // `sym_shapes` + // A dimension value can be assigned to a `ShapeSymbol` + // * if the symbol isn't assigned yet any dimension value + // * if the symbol is assigned and its value is equal to + // the dimension value from `new_sizes` + bool bindSymbolicShapes( + at::IntArrayRef new_sizes, + const c10::SymbolicShape& sym_shapes); +}; + +struct ProfilingRecord { + // N.B. ProfilingRecord's copy and move c-tor are disabled, so we won't + // end up accidentally copying or moving ProfilingRecords whose addresses + // are captured in callbacks_ + ProfilingRecord(const ProfilingRecord&) = delete; + ProfilingRecord(ProfilingRecord&&) noexcept = delete; + TORCH_API static std::unique_ptr instrumentGraph( + const std::shared_ptr& graph); + TORCH_API static void removeProfilingNodes(Block* b); + TORCH_API static void removeProfileCounter(Block* b); + + std::shared_ptr profiled_graph_; + mutable std::mutex mutex_; + size_t profiling_count_; + + bool ready() const; + + std::shared_ptr graph() const { + return profiled_graph_; + } + + TORCH_API ProfileIValueOp* createProfileIValueNode(Value* in_val); + TORCH_API ProfileIValueOp* createProfileIValueNode(ArrayRef inputs); + + private: + ProfileOp* createProfileNode( + const std::function& fp, + at::ArrayRef inputs); + void instrumentBlock(Block* block); + void insertShapeProfile(Node* n, size_t offset, const TypePtr& input_type); + ProfilingRecord(std::shared_ptr g); +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..de70cea3a1d50a515143feb2ec04393addc3df98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h @@ -0,0 +1,884 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { +constexpr inline c10::AliasAnalysisKind aliasAnalysisFromSchema() { + return c10::AliasAnalysisKind::FROM_SCHEMA; +} + +constexpr inline c10::AliasAnalysisKind aliasAnalysisConservative() { + return c10::AliasAnalysisKind::CONSERVATIVE; +} + +constexpr inline c10::AliasAnalysisKind aliasAnalysisSpecialCase() { + return c10::AliasAnalysisKind::INTERNAL_SPECIAL_CASE; +} + +template +c10::List make_result_list(const TypePtr& elemType) { + return c10::List(); +} + +template <> +c10::impl::GenericList make_result_list(const TypePtr& elemType); + +// As described in https://docs.python.org/3/library/functions.html#round +// When a number is exactly halfway between two integers, python builtin round +// function will round to even number. We use round(x/2)*2 to handle the +// special halfway case. For positive 'x', round(x/2)*2 = +// round((x_e + x_r)/2)*2 = x_e + round(x_r/2)*2, where x_e is an even integer, +// x_r is either 0.5 of 1.5, round(x_r/2)*2 results a 0 or 2, so the final +// result will always be a even number. Due to symmetricity, it also applies to +// negative cases. +inline double round_to_even(double a) { + return a - std::floor(a) == 0.5 ? (std::round(a * 0.5) * 2.0) : std::round(a); +} + +// using the rules from python_arg_parser FunctionParameter::check +// tensor cannot have grad set, tensor must be 0 dim, +// and if the dest is an int the source must be integral type +void checkImplicitTensorToNum(const at::Tensor& t, bool toInt); + +static C10_UNUSED int64_t floordiv(int64_t a, int64_t b) { + if (b == 0) { + throw std::runtime_error("division by 0"); + } + if ((a > 0) == (b > 0)) { + // simple case, both have same sign + return a / b; + } else { + // in python division rounds down, it doesn't not truncate like in c++ + auto r = lldiv(a, b); + return (r.rem) ? r.quot - 1 : r.quot; + } +} +TORCH_API void checkDoubleInRange(double a); +static C10_UNUSED int64_t floor(double a) { + checkDoubleInRange(a); + return std::floor(a); +} +static C10_UNUSED int64_t ceil(double a) { + checkDoubleInRange(a); + return std::ceil(a); +} + +static C10_UNUSED int64_t gcd(int64_t a, int64_t b) { + while (b != 0) { + int64_t r = a % b; + a = b; + b = r; + } + // in python gcd returns non-negative values + return std::abs(a); +} + +int64_t partProduct(int n, int m); + +void loop(int n, int64_t& p, int64_t& r); + +int nminussumofbits(int v); + +int64_t factorial(int n); +static const double degToRad = std::acos(-1.0) / 180.0; +static const double radToDeg = 180.0 / std::acos(-1.0); +double degrees(double x); +double radians(double x); + +// Convert an python index (which may be negative) into an index usable for a +// C++ container + +// Equivalent to list.at(idx) +template +decltype(auto) getItem(const c10::List& list, int64_t idx) { + const int64_t list_size = list.size(); + const int64_t normalized_idx = normalizeIndex(idx, list_size); + if (normalized_idx < 0 || normalized_idx >= list_size) { + throw std::out_of_range("list index out of range"); + } + return list.get(normalized_idx); +} + +template +void setItem(const c10::List& list, int64_t idx, T&& value) { + const int64_t list_size = list.size(); + const int64_t normalized_idx = normalizeIndex(idx, list_size); + if (normalized_idx < 0 || normalized_idx >= list_size) { + throw std::out_of_range("list index out of range"); + } + list.set(normalized_idx, std::forward(value)); +} + +void listAppend(Stack& stack); + +void listReverse(Stack& stack); + +template +void minList(Stack& stack) { + c10::List a = pop(stack).to>(); + c10::List b = pop(stack).to>(); + + size_t min_size = std::min(a.size(), b.size()); + for (const auto i : c10::irange(min_size)) { + if (a[i] == b[i]) { + continue; + } + + push(stack, a[i] < b[i] ? a : b); + return; + } + + push(stack, b.size() < a.size() ? b : a); +} + +template +void maxList(Stack& stack) { + c10::List a = pop(stack).to>(); + c10::List b = pop(stack).to>(); + + size_t min_size = std::min(a.size(), b.size()); + for (const auto i : c10::irange(min_size)) { + if (a[i] == b[i]) { + continue; + } + + push(stack, a[i] > b[i] ? a : b); + return; + } + + push(stack, b.size() > a.size() ? b : a); +} + +void listPopImpl(Stack& stack, const char* empty_message); + +void listPop(Stack& stack); + +void listClear(Stack& stack); + +void listDelete(Stack& stack); + +void listInsert(Stack& stack); + +template +void listRemove(Stack& stack) { + T elem = pop(stack).to(); + c10::List list = pop(stack).to>(); + + auto pos = std::find(list.begin(), list.end(), elem); + + if (pos != list.end()) { + list.erase(pos); + } else { + AT_ERROR("list.remove(x): x not in list"); + } +} + +template +void listMin(Stack& stack) { + c10::List list = pop(stack).to>(); + size_t list_size = list.size(); + if (list_size == 0) { + throw std::runtime_error("min() arg is an empty sequence"); + } + + T min_elem = list[0]; + for (const auto i : c10::irange(1, list_size)) { + T elem = list[i]; + min_elem = elem < min_elem ? elem : min_elem; + } + + stack.push_back(min_elem); +} + +template +void listMax(Stack& stack) { + c10::List list = pop(stack).to>(); + size_t list_size = list.size(); + if (list_size == 0) { + throw std::runtime_error("max() arg is an empty sequence"); + } + + T max_elem = list[0]; + for (const auto i : c10::irange(1, list_size)) { + T elem = list[i]; + max_elem = elem > max_elem ? elem : max_elem; + } + + stack.push_back(max_elem); +} + +template <> +void listRemove(Stack& stack); + +template +void listIndex(Stack& stack) { + T elem = pop(stack).to(); + c10::List list = pop(stack).to>(); + + auto pos = std::find(list.begin(), list.end(), elem); + + if (pos != list.end()) { + push(stack, static_cast(std::distance(list.begin(), pos))); + } else { + AT_ERROR("'", elem, "' is not in list"); + } +} + +template <> +void listIndex(Stack& stack); + +template +void listCount(Stack& stack) { + T elem = pop(stack).to(); + c10::List list = pop(stack).to>(); + + const int64_t count = std::count(list.begin(), list.end(), elem); + push(stack, count); +} + +template <> +void listCount(Stack& stack); + +void listExtend(Stack& stack); + +void listCopy(Stack& stack); + +void listSelect(Stack& stack); + +void listLen(Stack& stack); + +template +void listEq(Stack& stack) { + c10::List b = pop(stack).to>(); + c10::List a = pop(stack).to>(); + push(stack, a == b); +} + +template +void listNe(Stack& stack) { + c10::List b = pop(stack).to>(); + c10::List a = pop(stack).to>(); + push(stack, a != b); +} + +inline bool tensor_list_equal( + const c10::List& a, + const c10::List& b) { + if (a.size() != b.size()) { + return false; + } + + for (const auto i : c10::irange(a.size())) { + const at::Tensor& a_element = a[i]; + const at::Tensor& b_element = b[i]; + // This preserves Python's semantics, which uses eq() to compare two + // elements, then passes the result to bool(). + // see: https://docs.python.org/3.4/reference/datamodel.html#object.__ge__ + const auto cmp_result = a_element.eq(b_element); + if (!at::native::is_nonzero(cmp_result)) { + return false; + } + } + + return true; +} + +// Specialization for at::Tensor, since it doesn't define operator== +template <> +void listEq(Stack& stack); + +// Specialization for at::Tensor, since it doesn't define operator== +template <> +void listNe(Stack& stack); + +void listList(Stack& stack); + +template +void listContains(Stack& stack) { + auto key = pop(stack).to(); + auto list = pop(stack).to>(); + // NOLINTNEXTLINE(performance-implicit-conversion-in-loop) + for (const T& item : list) { + if (item == key) { + push(stack, true); + return; + } + } + push(stack, false); +} + +void listAdd(Stack& stack); + +void listInplaceAdd(Stack& stack); + +void listMulIntLeftInPlace(Stack& stack); + +void listMulIntLeft(Stack& stack); + +void listMulIntRight(Stack& stack); + +void listSlice(Stack& stack); + +template +void listSort(Stack& stack) { + bool reverse = pop(stack).toBool(); + c10::List list = pop(stack).to>(); + std::sort(list.begin(), list.end(), [reverse](const T& a, const T& b) { + // FBCode errors without this check - "strict weak ordering" + // TODO: remove when possible, since it just slows down + // sorting and doesn't do anything useful + if (a == b) { + return false; + } + return (a < b) != reverse; + }); +} + +// Specialization for at::Tensor +template <> +void listSort(Stack& stack); + +template +void listCopyAndSort(Stack& stack) { + c10::List list = pop(stack).to>(); + auto list_copied = list.copy(); + std::sort(list_copied.begin(), list_copied.end(), [](const T& a, const T& b) { + // "strict weak ordering" issue - see other sort + if (a == b) { + return false; + } + return a < b; + }); + push(stack, list_copied); +} + +// Specialization for at::Tensor +template <> +void listCopyAndSort(Stack& stack); + +void listSetItem(Stack& stack); + +struct OperatorGeneratorArgs { + const char* schema_str; + bool isOperationCreator; + union { + void (*operation)(Stack&); + OperationCreator operationCreator; + }; + AliasAnalysisKind aliasAnalysis; + + explicit constexpr OperatorGeneratorArgs( + torch::detail::SelectiveStr schema_str, + void (*op)(Stack&), + AliasAnalysisKind aa) + : schema_str(schema_str), + isOperationCreator(false), + operation(op), + aliasAnalysis(aa) {} + + explicit constexpr OperatorGeneratorArgs( + torch::detail::SelectiveStr schema_str, + OperationCreator opCreator, + AliasAnalysisKind aa) + : schema_str(schema_str), + isOperationCreator(true), + operationCreator(opCreator), + aliasAnalysis(aa) {} + + template + explicit constexpr OperatorGeneratorArgs( + torch::detail::SelectiveStr, + Args...) + : schema_str(nullptr), + isOperationCreator(false), + operation(nullptr), + aliasAnalysis(AliasAnalysisKind::INTERNAL_SPECIAL_CASE) {} +}; + +#define DEFINE_GENERIC_BINARY_OP( \ + aten_op, op, int_float_result, complex_result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".int_int(int a, int b) -> " #int_float_result), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op \ + ".float_float(float a, float b) -> " #int_float_result), \ + [](Stack& stack) { \ + double a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op \ + ".complex_complex(complex a, complex b) -> " #complex_result), \ + [](Stack& stack) { \ + c10::complex a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +// define implementations for primitive number ops +#define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, int_op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".float(float a, float b) -> " #float_result), \ + [](Stack& stack) { \ + double a, b; \ + pop(stack, a, b); \ + push(stack, float_op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_INT_FLOAT_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".int_float(int a, float b) -> " #result), \ + [](Stack& stack) { \ + int64_t a; \ + double b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".float_int(float a, int b) -> " #result), \ + [](Stack& stack) { \ + double a; \ + int64_t b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_INT_OP(aten_op, op) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> int"), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_STR_CMP_OP(aten_op, op) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".str(str a, str b) -> bool"), \ + [](Stack& stack) { \ + auto b = pop(stack).toStringRef(); \ + auto a = pop(stack).toStringRef(); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +// define a primitive op over Scalar operands. +// it's necessary to register this overload following +// int/float variations to avoid trapping Scalar args +// in unintended implicit conversions +#define DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, result, string_val) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op string_val \ + "(Scalar a, Scalar b) -> " #result), \ + [](Stack& stack) { \ + IValue x, y; \ + pop(stack, x, y); \ + if (x.isDouble()) { \ + if (y.isDouble()) { \ + double a = x.toDouble(); \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + double a = x.toDouble(); \ + int64_t b = y.toInt(); \ + push(stack, float_op); \ + } \ + } else { \ + if (y.isDouble()) { \ + int64_t a = x.toInt(); \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t a = x.toInt(); \ + int64_t b = y.toInt(); \ + push(stack, int_op); \ + } \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP(aten_op, int_op, float_op, result) \ + DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, result, "") + +#define DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION( \ + aten_op, int_op, float_op, result) \ + DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, result, ".Scalar_Scalar") + +#define DEFINE_BINARY_OP(aten_op, op) \ + DEFINE_GENERIC_OP(aten_op, op, op, int, float), \ + DEFINE_INT_FLOAT_OP(aten_op, op, float), \ + DEFINE_SCALAR_BINARY_OP(aten_op, op, op, Scalar) + +#define DEFINE_BINARY_FLOAT_OP(aten_op, op) \ + DEFINE_GENERIC_OP(aten_op, op, op, float, float), \ + DEFINE_INT_FLOAT_OP(aten_op, op, float), \ + DEFINE_SCALAR_BINARY_OP(aten_op, op, op, float) + +#define DEFINE_COMPARISON_OP(aten_op, op) \ + DEFINE_GENERIC_OP(aten_op, op, op, bool, bool), \ + DEFINE_INT_FLOAT_OP(aten_op, op, bool), \ + DEFINE_SCALAR_BINARY_OP(aten_op, op, op, bool), \ + DEFINE_STR_CMP_OP(aten_op, op) + +#define DEFINE_UNARY_INT_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a) -> " #result), \ + [](Stack& stack) { \ + int64_t a; \ + pop(stack, a); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_UNARY_FLOAT_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".float(float a) -> " #result), \ + [](Stack& stack) { \ + double a; \ + pop(stack, a); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_UNARY_OP(aten_op, op, int_result, float_result) \ + DEFINE_UNARY_INT_OP(aten_op, op, int_result), \ + DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \ + [](Stack& stack) { \ + IValue x; \ + pop(stack, x); \ + if (x.isDouble()) { \ + double a = x.toDouble(); \ + push(stack, static_cast(op)); \ + } else { \ + int64_t a = x.toInt(); \ + push(stack, static_cast(op)); \ + } \ + }, \ + aliasAnalysisFromSchema()) +#define DEFINE_BOOL_OP(aten_op, op) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".bool(bool a, bool b) -> bool"), \ + [](Stack& stack) { \ + bool a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) +#define DEFINE_STRING_OP(op_name, string_op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#op_name ".str(str a, str b) ->" #result), \ + [](Stack& stack) { \ + auto b = pop(stack).toStringRef(); \ + auto a = pop(stack).toStringRef(); \ + push(stack, string_op); \ + }, \ + aliasAnalysisFromSchema()) + +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +#define DEFINE_UNARY_COMPLEX_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".complex(complex a) -> " #result), \ + [](Stack& stack) { \ + c10::complex a; \ + pop(stack, a); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +// Some complex unary ops (like abs, angle) return real valued output, but most +// other unary ops return complex valued output. So, this macro is used in the +// former case where we can explicitly pass complex_result_cast argument, which +// is set to c10::complex in the macro `DEFINE_UNARY_OP_WITH_COMPLEX` +// defined below. +#define DEFINE_UNARY_OP_WITH_COMPLEX_CAST( \ + aten_op, \ + op, \ + int_result, \ + float_result, \ + complex_result, \ + complex_result_cast) \ + DEFINE_UNARY_INT_OP(aten_op, op, int_result), \ + DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \ + DEFINE_UNARY_COMPLEX_OP(aten_op, op, complex_result), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \ + [](Stack& stack) { \ + IValue x; \ + pop(stack, x); \ + if (x.isDouble()) { \ + double a = x.toDouble(); \ + push(stack, static_cast(op)); \ + } else if (x.isComplexDouble()) { \ + c10::complex a = x.toComplexDouble(); \ + push(stack, static_cast(op)); \ + } else { \ + int64_t a = x.toInt(); \ + push(stack, static_cast(op)); \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_UNARY_OP_WITH_COMPLEX(aten_op, op, int_result, float_result) \ + DEFINE_UNARY_OP_WITH_COMPLEX_CAST( \ + aten_op, op, int_result, float_result, complex, c10::complex) + +#define DEFINE_GENERIC_OP_WITH_COMPLEX( \ + aten_op, \ + int_op, \ + float_op, \ + complex_op, \ + int_result, \ + float_result, \ + complex_result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, int_op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".complex(complex a, complex b) -> " #complex_result), \ + [](Stack& stack) { \ + c10::complex a, b; \ + pop(stack, a, b); \ + push(stack, complex_op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".float(float a, float b) -> " #float_result), \ + [](Stack& stack) { \ + double a, b; \ + pop(stack, a, b); \ + push(stack, float_op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_INT_COMPLEX_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".int_complex(int a, complex b) -> " #result), \ + [](Stack& stack) { \ + int64_t a; \ + c10::complex b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".complex_int(complex a, int b) -> " #result), \ + [](Stack& stack) { \ + c10::complex a; \ + int64_t b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_FLOAT_COMPLEX_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".float_complex(float a, complex b) -> " #result), \ + [](Stack& stack) { \ + double a; \ + c10::complex b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".complex_float(complex a, float b) -> " #result), \ + [](Stack& stack) { \ + c10::complex a; \ + double b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, complex_op, result, string_val) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op string_val \ + "(Scalar a, Scalar b) -> " #result), \ + [](Stack& stack) { \ + IValue x, y; \ + pop(stack, x, y); \ + if (x.isComplexDouble()) { \ + c10::complex a = x.toComplexDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, complex_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, complex_op); \ + } \ + } else if (x.isDouble()) { \ + double a = x.toDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, float_op); \ + } \ + } else { \ + int64_t a = x.toInt(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, int_op); \ + } \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_WITHOUT_INT_COMPLEX_PAIR( \ + aten_op, int_op, float_op, complex_op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op "(Scalar a, Scalar b) -> " #result), \ + [](Stack& stack) { \ + IValue x, y; \ + pop(stack, x, y); \ + if (x.isComplexDouble()) { \ + c10::complex a = x.toComplexDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, complex_op); \ + } \ + } else if (x.isDouble()) { \ + double a = x.toDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, float_op); \ + } \ + } else { \ + int64_t a = x.toInt(); \ + if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else if (y.isInt()) { \ + int64_t b = y.toInt(); \ + push(stack, int_op); \ + } \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX( \ + aten_op, int_op, float_op, complex_op, result) \ + DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, complex_op, result, "") + +#define DEFINE_BINARY_OP_WITH_COMPLEX(aten_op, op) \ + DEFINE_GENERIC_OP_WITH_COMPLEX(aten_op, op, op, op, int, float, complex), \ + DEFINE_INT_COMPLEX_OP(aten_op, op, complex), \ + DEFINE_FLOAT_COMPLEX_OP(aten_op, op, complex), \ + DEFINE_INT_FLOAT_OP(aten_op, op, float), \ + DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX(aten_op, op, op, op, Scalar) + +#define DEFINE_COMPARISON_OP_WITH_COMPLEX(aten_op, op) \ + DEFINE_GENERIC_OP_WITH_COMPLEX(aten_op, op, op, op, bool, bool, bool), \ + DEFINE_INT_FLOAT_OP(aten_op, op, bool), \ + DEFINE_FLOAT_COMPLEX_OP(aten_op, op, bool), \ + DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_WITHOUT_INT_COMPLEX_PAIR( \ + aten_op, op, op, op, bool), \ + DEFINE_STR_CMP_OP(aten_op, op) + +TORCH_API at::Generator make_generator_for_device( + c10::Device device, + c10::optional seed = c10::nullopt); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h new file mode 100644 index 0000000000000000000000000000000000000000..7abaf5d73f83e3edbf68cdccd44061907847ad9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h @@ -0,0 +1,103 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace torch::jit { +namespace profiling { + +struct Datapoint { + using Timepoint = std::chrono::time_point; + SourceRange sourceRange; + Timepoint start; + Timepoint end; + + explicit Datapoint(SourceRange sr) + : sourceRange(std::move(sr)), start(std::chrono::steady_clock::now()) {} +}; + +class TORCH_API InstructionSpan { + public: + explicit InstructionSpan(Node&); + ~InstructionSpan(); + InstructionSpan(InstructionSpan&&) = delete; + InstructionSpan& operator=(InstructionSpan&&) = delete; + + private: + std::unique_ptr datapoint_; +}; + +bool TORCH_API isProfilingOngoing(); + +} // namespace profiling + +struct TORCH_API InstructionStats : public CustomClassHolder { + int64_t count{0}; + std::chrono::nanoseconds duration{0}; +}; + +class TORCH_API SourceStats : public CustomClassHolder { + public: + using LineMap = c10::Dict>; + + SourceStats(SourceRef source, LineMap lineMap) + : source_(std::move(source)), lineMap_(std::move(lineMap)) {} + + const SourceRef& getSourceRef() const { + return source_; + } + + const LineMap& getLineMap() const { + return lineMap_; + } + + private: + SourceRef source_; + LineMap lineMap_; +}; + +/** + * ScriptProfile is an underlying C++ implementation for TorchScript profiling. + * The profiling section is specified by calling enable() and disable(): + * + * ... + * scriptProfile.enable(); + * ... + * (scripts) + * ... + * scriptProfile.disable(); + * ... + * + * NOTE: you cannot attach the profiler while the script is running. + * + * To retrieve collected runtime data, users may call dumpStats() and do + * arbitrary filtering on the data they want. Note that dumpStats() should + * not be called inside a profiling section. + * In general, stats are aggregated per source function body, and then by line + * number. + */ +class TORCH_API ScriptProfile : public CustomClassHolder { + // Aggregates datapoints by function source id, then by line number. + using LineMap = std::map; + using SourceMap = std::map>; + + public: + void enable(); + void disable(); + const SourceMap& dumpStats(); + void addDatapoint(std::shared_ptr); + ~ScriptProfile() override; + + private: + bool enabled_{false}; + std::vector> datapoints_; + SourceMap sourceMap_; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..e822f3f93e3d29d533f27e8565d7a0de787f33b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedShapeFunctions(); + +TORCH_API const OperatorMap& GetShapeFunctionMappings(); + +TORCH_API const OperatorMap>& +GetBoundedShapeMappings(); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..533b1f11020763e2d6d1d05734c6a4b09bcc44aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedFuncs(); + +TORCH_API const OperatorMap& GetFuncMapping(); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..34272000f0d1a3e2e808ce2bbe27ec4ab299380e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h @@ -0,0 +1,23 @@ +#pragma once +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API SimpleGraphExecutorImpl : public GraphExecutorImplBase { + SimpleGraphExecutorImpl( + const std::shared_ptr& graph, + std::string function_name); + + const ExecutionPlan& getPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth) override; + GraphExecutorState getDebugState() override; + ~SimpleGraphExecutorImpl() override = default; + + private: + c10::optional execution_plan_; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h new file mode 100644 index 0000000000000000000000000000000000000000..720c8b69e5ecd55cbe9a00d13342fa9f5cbc98db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +// Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +// 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software +// Foundation; All Rights Reserved +// +// Stolen (with appropriate modifications) by @agolynski +// (https://github.com/pytorch/pytorch/pull/33019) from cpython repo +// Objects/sliceobject.c with comment: this is harder to get right than you +// might think +// +// This adjusts indexes according to python list semantics and returns number +// of elements in the resulting list. +TORCH_API int64_t slice_indices_adjust( + int64_t length, + int64_t* start, + int64_t* stop, + int64_t step); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h new file mode 100644 index 0000000000000000000000000000000000000000..64e0d6661baebc3bb0c82831a8566dba3e0112f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h @@ -0,0 +1,18 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include +#include + +namespace torch::jit { +struct GradientPair { + std::shared_ptr forward; + std::shared_ptr backward; +}; + +TORCH_API c10::optional gradientInfoForSchema( + const FunctionSchema& schema); +TORCH_API bool hasGradientInfoForSchema(const FunctionSchema& schema); +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..2d09eb27876b7674e6bb29651c7a5082d0f6b599 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h @@ -0,0 +1,69 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +/* +ADDING A NEW SHAPE GRAPH: +- For one node schema, there is one corresponding registered shape compute +graph. The schema of the graph should be the same except for Tensor arguments. +For every Tensor input in operator schema, there should be a List[int] +corresponding to that Tensor's shape. For example: "aten::linear(Tensor input, +Tensor weight, Tensor? bias=None) -> Tensor" ==> def linear(input: List[int], +weight: List[int], bias: Optional[List[int]]) + +Additionally, arguments which are unused at the end of the schema may be left +off. This allows sharing a single graph for multiple function schemas, such as +unary operators with different trailing arguments that do not affect the output +shape. + +The shape graph should return a new, unaliased List[int] (or tuple of lists for +multiple returns) and should not modify any input lists. This allows the shape +graphs to be composed and executed. + +The shape analysis (particularly for non-complete, or symbolic shapes) works by +partially evaluating the JIT IR. It may be possible for a Graph to be registered +that we cannot currently partially evaluate. If this happens, please file an +issue. There are lints registered to avoid particular known patterns (continue +or break or early return in a loop). Those may be improved in the future, please +file an issue if necessary. + +To debug (and write initially) the recommended flow is to define these functions +in python and iterate there. Functions should be added to +torch/jit/_shape_functions. + +To test operators, the preferred flow is through OpInfos, with +`assert_jit_shape_analysis=True`. If this is not feasible, you can look at tests +in `test_symbolic_shape_analysis.py` such as `test_adaptive_avg_pool2d`. + +Operators which take in a list of tensors, such as concat, are not yet +supported. Concat has been special cased and could be generalized as needed. +Please file an issue. +*/ + +struct BoundedShapeGraphs { + std::shared_ptr lower_bound; + std::shared_ptr upper_bound; +}; + +TORCH_API void RegisterShapeComputeGraphForSchema( + const FunctionSchema& schema, + std::shared_ptr g); + +TORCH_API c10::optional> shapeComputeGraphForSchema( + const FunctionSchema& schema); + +TORCH_API c10::optional boundedGraphsForSchema( + const FunctionSchema& schema); + +TORCH_API std::vector RegisteredShapeComputeSchemas(); + +TORCH_API void LintShapeComputeGraph( + const FunctionSchema* schema, + const std::shared_ptr& graph); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e1280504e5c914f51809e300d0d46bc182ae9789 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h @@ -0,0 +1,12 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +TORCH_API const OperatorMap& get_tensorexpr_elementwise_set(); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0be53d4ffeb28b910e1c3f9d3eb1115a7e527784 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h @@ -0,0 +1,41 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace torch::jit { + +void tupleUnpack(Stack& stack); + +void format(Stack& stack, size_t num_inputs); + +void einsum(Stack& stack, size_t num_inputs); + +void percentFormat(Stack& stack, size_t num_inputs); + +void listUnpack(Stack& stack, size_t num_outputs); + +void tupleConstruct(Stack& stack, size_t num_inputs); + +void namedTupleConstruct(Stack& stack, c10::TypePtr type, size_t num_inputs); + +void listConstruct(Stack& stack, const c10::Type& list_type, size_t num_inputs); + +void dictConstruct(Stack& stack, const c10::Type& type, size_t num_inputs); + +// as weak_ref will create a Object with a non-owning CompilationUnit reference, +// for use as a constant in the Graph to avoid a reference cycle +void createObject( + Stack& stack, + const at::ClassTypePtr& type, + bool as_weak_ref = false); + +void isinstance(Stack& stack, at::ArrayRef types); + +void tupleSlice(Stack& stack, size_t begin, size_t end); + +void dequantize(Stack& stack); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h new file mode 100644 index 0000000000000000000000000000000000000000..e8dcd4f2c5b0b95f0f727e97794ddc321fe49fc7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h @@ -0,0 +1,17 @@ +#pragma once +#include + +namespace torch::jit { + +// a wrapper to mark places where we expect all the at::Tensors to be +// variables +struct variable_tensor_list : public std::vector { + variable_tensor_list() = default; + template + variable_tensor_list(InputIt first, InputIt last) + : std::vector(first, last) {} + explicit variable_tensor_list(std::vector&& tensor) + : std::vector(std::move(tensor)) {} +}; + +} // namespace torch::jit