diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..d25ce2f8cb0416cc3afe1ecd031aa73d28201f98 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h @@ -0,0 +1,140 @@ +#pragma once +#include + +#include +#include +#include + +#include + +namespace torch { +namespace jit { + +/* + * BackendDebugHandleManager is responsible for issuing debug handles to + * backends. Debug handles are associated with nodes of a graph. + * BackendDebugHandleManager also maintains a map + * [debug-handle, DebugInfoTuple = {source range, inlined callstack ptr]} that + * will help generate a callstack for exception raised using debug handles. + * Effectively debug handles are something that is given to backend and later + * when an exception occurs in the backend, backend can tell, using debug + * handle, that an exception occurred here. Then the runtime can generate + * callstack correspoding to the exception. + * There are two parts to BackendDebugHandleManager: + * 1. static std::atomic debug_handle + * 2. Map of [debug-handle, DebugInfoTuple] + * + * About 1: + * Why do they have to be unique. The reason is that by ensuring + * uniqueness of debug handles, we remove the burden of another layer of + * mapping where we need to say this set of debug handles were generated for + * this lowered module or this bytecode function. This simplifies the API for + * serialization since debug handles can uniquely identify DebugInfoTuple. + * Thus simplifies the runtime API for throwing exception. Exception throwing + * only needs to know debug_handle and not which module or method threw it. + * There are 2 issues to keep in mind, though,for static std::atomic + * debug_handle: A. Performance implications of using atomic variable. However + * this is only used for compilation so we assume to absorb some of that + * penalty. Plus if there is no contention then we should have less to worry + * about. B. If repeated compilation is part of a long running process then we + * may overflow int64_t. We may detect and fail on this. For now this is not + * done. + * + * Now about 2: + * There are two usecases for [debug-handle, DebugInfoTuple] + * A. During bytecode generation the DebugInfoTuple corresponding to the nodes + * of the inlined graph being serialized, are stored in this object and a + * unique debug handle is returned. This unique debug handle is stored in + * mobile_debug info for pytorch lite models. It will be used for raising + * exceptions as well as profiling. B. During backend lowering, each backend's + * preprocess/compile method can compile method's graph and serialize those + * methods. Once the method is lowered to backend, graph is essentially lost. + * Without access to graph it is hard to generate model level debug info. Thus + * the debug handles provide a way to map nodes of the graph to the model level + * debug info. + * + * During byte-code model serialization, [debug-handle, DebugInfoTuple] is + * serialized. Now we know a. debug handles and b. how to map debug handles to + * model source code. Thus we can either do eager symbolication by converting + * debug handles to corresponding source code at runtime, or do lazy + * symbolicattion offline. + * + * Note that it is not necessary to serialize [debug-handle, DebugInfoTuple] + * corresponding to lowered backend if the lowering process, that is + * preprocess/compile, and execution happens in the same session, then eager + * symbolication can be employed. + * + * Now how does BackendDebugHandleManager capture all of the above? + * By providing two API. + * 1. getNextDebugHandle which given a Node* returns a unique debug handle, + * that will uniquely identify DebugInfoTuple. + * and + * 2. getCallStackPtrMap which returns the map + * [debug-handle, DebugInfoTuple] + * + * 1 provides debug handles to backends and 2 provides runtime a way to map + * debug handles to source level debug info. + * + * So why does debug handle map to DebugInfoTuple = {source range and inlined + * cs}? {debug_handle, source_range_tag, serialized_callstack} Take this + * example: class L(nn.Module): def __init__(self): + * ... + * def forward(self, x): + * return x * 5 + * class M(nn.Module): + * def __init__(self): + * ... + * def forward(self, x): + * return x - 2 + * class N(nn.Module): + * def __init__(self): + * self.m = M() + * def forward(self, x): + * return self.m(x) + 3 + * m = torch.jit.script(N()) + * Once you inline m's forward method, m.forward.graph will look something + * like this + * graph(%self...): + * %x = aten::mul(..) + * %x = aten::sub(x, ..) + * %y = aten::add(x, ..) + * .. + * Inlined callstack ptr for these two nodes will look like: + * aten::mul's inlined CS (callstack): [N.forward, source range] -> [M.forward, + * source range] aten::sub's inlined CS (callstack): [N.forward, source range] + * aten::add's inlined CS: null + * mul node's inlined CS contains only information about the callsites' source + * range The information about mul node's source range ('return x * 5') is not + * available in its inlined CS. It is rather part of node's source range + * instead of inlined CS. Thus to get full stack: [N.forward, source range] -> + * [M.forward, source range] -> [aten::mul's source range] We need to track + * mul's source range and inlined CS both. + */ + +using BackendDebugInfoMapType = + std::unordered_map; + +/* + * This class is used to generate debug info map. + * backend's preprocess will call generate_debug_handles (see + * backend_detail.cpp), which uses debug_handle_manager to generate debug + * handles. When lowering process finishes, calling stopRecording will + * return debug info map from debug_handle_manager + */ +class TORCH_API BackendDebugInfoRecorder { + public: + BackendDebugInfoRecorder() = default; + int64_t getNextDebugHandle(const Node* node); + // Reason this is not done as RAII is that work done in stopRecording + // can throw, and throwing with dtor will call terminate and thus voids any + // exception catching at a higher level. + BackendDebugInfoMapType stopRecording(); + NodeToDebugHandle generate_debug_handles(const std::shared_ptr& graph); + + private: + static std::atomic unique_debug_handle_; + BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h new file mode 100644 index 0000000000000000000000000000000000000000..c6b3f9376d6b328f1d7eaf6cda14b007a6fc8e45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool AddIfThenElseOp(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h new file mode 100644 index 0000000000000000000000000000000000000000..ca21f2c60d0315a356df82791a1ba60f9cfc0123 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h @@ -0,0 +1,15 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void Autocast(const std::shared_ptr& graph); + +TORCH_API bool setAutocastMode(bool value); +TORCH_API bool autocastEnabled(); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..266f54023e1a153492f6f37160c04c7e013e588d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +// Replaces prim::Guard nodes with prim::BailOut nodes and +// computes sets of inputs needed to resume execution at +// bailout points +TORCH_API void InsertBailOuts(std::shared_ptr graph); + +// Builds a bailout graph into `target` (which is an empty graph) +// for a given bailout point `bailout_index` +// from the original graph `orig` (the original unoptimized graph) +// BailOut graphs allow Interpreter to resume +// execution of the (un/de)optimized graph (i.e. +// a graph that doesn't rely on any assumptions derived from +// on profiling information) from a given BailOut point +// should any of the assumptions fail for an actual input. +TORCH_API std::shared_ptr BuildBailOutGraphFrom( + int64_t bailout_index, + const std::shared_ptr& orig, + const std::shared_ptr& target); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..643134750cc483439685c4dde566ead0d5bc7a3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void BatchMM(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..9c8956029dace1988981fa98e8a05f9498c25f81 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool EliminateCommonSubexpression( + const std::shared_ptr& graph); +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..6eb4ca16077eb41287c5009ebc52744eaf235a62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void ConstantPooling(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..780c11f95a9bb9dcaa4fd07aec92409d0f2cd527 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// If given a top-level graph, DCE will construct do alias analysis that allows +// for "smarter" dead code elimination (we will eliminate mutable ops if we can +// prove the mutated values are not used). Otherwise, we will not allow DCE to +// eliminate mutable ops. +// +// So, prefer to use the graph version if you can. +enum class DCESideEffectPolicy : uint8_t { + // default behavior: dead code elimination will check if a node has side + // effects + // and not delete it if it does. + DONT_DELETE_NODES_WITH_SIDE_EFFECTS, + // with this flag, dead code elimination will not check if a node has side + // effects and treat nodes with side effects like any other node, + // i.e. delete them if their outputs aren't used anywhere. + ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS +}; + +TORCH_API void EliminateDeadCode( + const std::shared_ptr& graph, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); +TORCH_API void EliminateDeadCode( + Block* block, + bool recurse = true, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); + +// Invoke the user-provided callback on all live values before deleting anything +TORCH_API void EliminateDeadCode( + Block* block, + std::function&)> cb, + DCESideEffectPolicy sideEffectPolicy = + DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..400e5997d6368d08edfacc76c969c07828a2c17b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void DecomposeOps(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..b4bcdcdc7ae7ea32abf2bfb49a07be8ec48dd82d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +struct Graph; + +// Propagate tensor properties (e.g., dtype, device, is_contiguous, layout) +// propagation on all tensor objects. Currently, we only support dtype +// propagation +TORCH_API bool DtypePropagation(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h new file mode 100644 index 0000000000000000000000000000000000000000..472d95843a1c6dc921b072b59ea013fcbb6d57ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +// Directly after tracing, we have an ill-formed graph with blocks inserted. +// Example: +// +// graph(%self : ClassType, +// %input.1 : Float(3, 4)): +// %1 : ClassType = prim::GetAttr[name="relu1"](%self) +// %2 : ClassType = prim::GetAttr[name="relu2"](%self) +// %3 : ClassType = prim::GetAttr[name="rrr"](%2) +// = prim::TracedModuleForward[scope="__module.relu1"]() +// block0(): +// %input : Float(3, 4) = aten::relu(%input.1), +// -> () +// = prim::TracedModuleForward[scope="__module.relu2"](), +// block0(): +// = prim::TracedModuleForward[scope="__module.relu2.rrr"](), +// block0(): +// %6 : Float(3, 4) = aten::relu(%input), +// -> () +// -> () +// return (%6) +// +// In this pass, we: +// 1) Lift Value defs to as high of a scope as needed to ensure that +// they dominate all their uses. For example, `input` in the above +// graph needs to be lifted to the top-level block so that its use +// in the second `relu` operator is dominated. +// 2) Lambda lift the blocks. This ensures that all values used within +// each scope have their defs captured. +// 3) Convert the scope blocks into methods on their respective Modules, +// and convert TracedModuleForward nodes to CallMethod nodes into those +// methods. +// +// Then, we'll have a well-formed graph with proper method calls. +TORCH_API void FixupTraceScopeBlocks( + std::shared_ptr& graph, + Module* self); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h new file mode 100644 index 0000000000000000000000000000000000000000..4032d22f2bc13f667cec5025148879cd7117cf83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this + * module and all its submodules, forward is included by default. + * + * The weight and bias of the Conv2d are correspondingly updated. Should only be + * used on modules in eval mode. + */ +TORCH_API Module FoldConvBatchNorm(const Module& module); + +struct TORCH_API ConvBNParameters { + at::Tensor conv_w; + at::Tensor conv_b; + at::Tensor bn_rm; + at::Tensor bn_rv; + double bn_eps = 0.0; + at::Tensor bn_w; + at::Tensor bn_b; +}; + +/** + * Given the current weight and bias tensors of a Conv module and parameters + * of the BatchNorm module we're folding with, compute the updated values + * for the weight and bias. + * + * The function is basically copied from torch/nn/utils/fusion.py + */ +TORCH_API std::tuple computeUpdatedConvWeightAndBias( + const ConvBNParameters& p); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h new file mode 100644 index 0000000000000000000000000000000000000000..87c610781f1126602123bb951f365bc365791f9d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +/** \brief Runs a set of Optimizations that Optimize Frozen Graphs + * + * Currently this set of optimizations is: + * - FoldFrozenConvBatchnorm + * - FoldFrozenConvAddOrSub + * - FoldFrozenConvMulOrDiv + * - FoldFrozenLinearBatchnorm + */ + +namespace torch { +namespace jit { + +TORCH_API void OptimizeFrozenGraph( + std::shared_ptr& graph, + bool optimize_numerics = true); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..e952d1c43cef39020405de944bba8b3856398ed3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Transposes the weight matrix for frozen linear modules. +// and converts it into a matmul +TORCH_API bool FrozenLinearTranspose(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..56d37518e37866fa4ee14242215e5079c9c30f4b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h @@ -0,0 +1,24 @@ +/** \brief Fusing linear patterns as single at::linear for easier pattern + * matching in later passes + */ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** \brief Match the at::linear pattern and fuse it into a single at::linear + * This pass fuse the addmm or matmul + add generated by JIT back to linear + * This pass can be deleted once the JIT can emit the aten::linear in the future + */ +TORCH_API void FuseLinear(std::shared_ptr& graph); + +/** Swap functional linear CallFunctions to aten::linear + */ +TORCH_API void SwapFunctionalLinear(std::shared_ptr& graph); +/** Swap all functional linear CallFunctions in module + */ +TORCH_API void SwapFunctionalLinear(Module& module); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..0920830babb8b326e994339e8c479593091d36cb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace graph_rewrite_helper { + +std::string getFuncName(Value* func_value); +Value* getValue( + const std::string& name, + const std::unordered_map& match_vmap, + const std::unordered_map& vmap); +c10::optional getIValue( + const std::string& name, + const std::unordered_map& match_vmap, + const std::unordered_map& vmap); +TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr& graph); + +bool isClampFusable( + const Match& match, + const std::unordered_map& vmap); + +// This struct contains a compiled IR patterns slated for use in the +// findPatternMatches function. The struct encapsulates the common +// information from parseIR that is used in conjunction with the +// pattern matching facility. A const instance of this struct can +// also be stored away to cache the compiled IR pattern and reduce +// runtime cost +struct PatternInfo { + std::string pattern_string; + std::unique_ptr pattern_graph; + std::unordered_map vmap; + std::vector filters; + + static PatternInfo parse_from_str( + std::string pattern_string, + const std::vector& filters = {}) { + PatternInfo rv{ + std::move(pattern_string), + std::make_unique(), + decltype(vmap){}, + filters}; + parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap); + return rv; + } +}; + +} // namespace graph_rewrite_helper +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..03f13140e370df55903bd9ce00ea04b624bce795 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void EliminateRedundantGuards(std::shared_ptr graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h new file mode 100644 index 0000000000000000000000000000000000000000..37bfa07a1267023bcd6d2227b03dac75d0f233b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +void HoistConvPackedParams(script::Module& m); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h new file mode 100644 index 0000000000000000000000000000000000000000..c2dbacdc4ddab7a18f495cfc9f8dc34640f65902 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Inline Fork and Wait calls. This is used, for example, in ONNX export, where +// we do not support the explicit parallelism structures and would rather +// just have a flat graph. This inlines the forked section in the fork() +// callsite and replaces uses of the result of wait() calls with the values +// produced from the (now-inlined) forked section. +TORCH_API void InlineForkWait(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h new file mode 100644 index 0000000000000000000000000000000000000000..28d9f168bf3d559ad434883954004797ae96e690 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void InsertGuards(std::shared_ptr graph); + +TORCH_API void RemoveProfilingNodes(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h new file mode 100644 index 0000000000000000000000000000000000000000..c7cee8417fa457d671da2efbc6a19493762fcffb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void liftClosures(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h new file mode 100644 index 0000000000000000000000000000000000000000..7b612dee9622304b5f9279215da7c798b5958b4b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace torch { +namespace jit { + +using SparseBitVector = ::c10::SparseBitVector<256>; + +// BuildLivenessSets computes "bailout" liveness which is equivalent to +// "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}" +TORCH_API std::unordered_map> BuildLivenessSets( + std::shared_ptr graph); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..6c9ea6666835a26e39ddf82830d1c43b7cd45748 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +using ModulePtr = c10::intrusive_ptr; + +// Given a graph with of a method which first argument is %self, lower it to a +// graph where all attributes accesses are replaced with explicit inputs of the +// graph (rather than results of prim::GetAttr executed on %self). +// +// Returns a tuple (graph, parameters) where the last module.parameters.size() +// inputs to the graph are the trainable parameters used in this method. The +// remaining inputs are the true inputs to the function. +TORCH_API std::pair, std::vector> LowerGraph( + Graph& graph, + const ModulePtr& self); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..30e4825cedd17962a7f90dfa93e6a7b5dba319cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h @@ -0,0 +1,17 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { +TORCH_API void metalInsertPrePackedOps(std::shared_ptr& graph); +TORCH_API void metalInsertPrePackedOps(script::Module& module); +TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module); +TORCH_API void metalFoldPrePackingOps(script::Module& module); +TORCH_API script::Module metalOptimizeForMobile( + const script::Module& module, + const std::vector& preserved_methods); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..30d02332429a81ec50c6dc3fae8ab7cddff88714 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include + +#if AT_MKLDNN_ENABLED() + +#include + +#endif // AT_MKLDNN_ENABLED() + +namespace torch { +namespace jit { + +#if AT_MKLDNN_ENABLED() + +namespace mkldnn { + +const static std::map> + fusion_rewrite_map = { + {"none", {}}, + {"relu", {}}, +}; + +} // namespace mkldnn + +#endif // AT_MKLDNN_ENABLED() + +void FuseConvWithEltwise(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4d630392ca47df0d0a32ef1bf5d25bbb4a41c163 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// This pass converts aten ops to a normalized form. It is +// run immediately after IR generation in both the tracer and compiler, +// so downstream consumers of the IR do not need handle ops in their +// pre-normalized form. +// Currently only handles normalization of op aliases. +TORCH_API void NormalizeOps(const std::shared_ptr& graph); + +const std::unordered_map& getOperatorAliasMap(); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..aeb79470b01ae60e38282b4d29b6942af4189ac5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace jit { +namespace fuser { +namespace onednn { + +static std::atomic onednn_enabled{true}; + +static std::atomic& getLlgaEnabled() { + return onednn_enabled; +} + +TORCH_API void fuseGraph(std::shared_ptr& g); + +} // namespace onednn +} // namespace fuser + +struct C10_EXPORT RegisterLlgaFuseGraph + : public PassManager { + static bool setEnabled(bool enabled) { + TORCH_CHECK( + AT_MKLDNN_ENABLED(), + "Running oneDNN Graph fuser is only supported with MKLDNN builds."); + bool oldState = fuser::onednn::getLlgaEnabled(); + fuser::onednn::getLlgaEnabled() = enabled; + if (enabled) { + registerPass(fuser::onednn::fuseGraph); + } else { + clearPass(); + } + return oldState; + } + + static bool isEnabled() { + return fuser::onednn::getLlgaEnabled(); + } + + // override PassManager::registerPass to register pre-pass + static bool registerPass(GraphPass p) { + if (!isRegistered()) { + passID(registerPrePass(std::move(p)), true); + isRegistered(true); + return false; + } + return true; + } + + // override PassManager::clearPass to clear pre-pass + static void clearPass() { + if (isRegistered()) { + clearPrePass(passID()); + isRegistered(true); + } + } +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h new file mode 100644 index 0000000000000000000000000000000000000000..d61a0a4ec0d8be8e1c3e49e352b288a4767b9ed0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes alias sensitive peepholes +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified +// Optimizes on TensorType if shape_peepholes is true +TORCH_API bool PeepholeOptimizeAliasSensitive( + const std::shared_ptr& graph, + bool shape_peepholes); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h new file mode 100644 index 0000000000000000000000000000000000000000..283c313d9ee2ae8024ba286d1a5bd0ea5cf1fdd3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes Dict Ops such as len() and __getitem__ +// 1. getitem optimizations +// Given a function like this: +// def foo(): +// d = {0 : 1} +// x = d[0] +// return x +// This pass produces (after dead code elimination): +// def foo(a, b): +// return 1 +// +// This optimization can only happen if the dict is not modified +// and the dict has constant, non overlapping keys. +// +// 2. len optimizations +// Given a function like this: +// def foo(): +// d = {0 : 1} +// return len(d) +// This pass produces (after dead code elimination): +// def foo(): +// return 1 +// +// This has the same requirements as the getitem optimizations. +// +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified. +TORCH_API bool PeepholeOptimizeDictIdioms(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..1e4daebd060cc9365c8994219803a65891c69d4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +// Optimizing General Graph Patterns that +// are not covered in peephole.cpp and peephole_list_idioms +TORCH_API bool PeepholeOptimizeNonTensor(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h new file mode 100644 index 0000000000000000000000000000000000000000..0204d5f73f04f3420f8d822c782739417b31b7e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** Recursively deduplicate multiple uses of the same module by + * creating an instance clone for each use of the module, which means + * the type will be the same as before and all the attributes will be + * copied, then we'll change the use of the original module to the use + * of cloned module in the Graph. + * + * This is done to ensure that modules can survive destructive passes + * without changing model behavior. For example, here: + * + * x = self.conv1(x) + * x = self.relu(x) + * x = self.conv2(x) + * x = self.relu(x) + * + * self.relu needs to be deduplicated for potential future destructive passes + * to work properly. + */ +TORCH_API void DedupModuleUses(Module& module); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h new file mode 100644 index 0000000000000000000000000000000000000000..d73addbc387f6b4d55360480b3c20fc1e0b84d3c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +/** \brief Backend specific pass to fuse dequantize - op - quantize calls + * as quantized_op calls. + * + * Right now this is a fusion for fbgemm backend and only works for quantized + * conv op, we'll extend to more ops and more backends in the future. + * + * Currently supported fusion: + * q(conv2d(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_conv2d(prepack(to_nhwc(a)), + * prepack(to_nhwc(w)), + * prepack(to_nhwc(b)))) + * + * q(linear(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_linear(prepack(to_nhwc(a)), + * prepack(to_nhwc(w)), + * prepack(to_nhwc(b)))) + * + * \param graph the graph we want to apply fusion + */ +TORCH_API void QuantFusion( + std::shared_ptr& graph, + QuantType quant_type = QuantType::STATIC); + +/** \brief Insert prepack and unpack function in graph + * We want add pack/unpack functions for quantized weight because later we want + * to fold the packed weight as an attribute of the module, in order to reduce + * the cost of packing the weight on the fly in quantized models. + * + * Each quantized op has it's corresponding prepack/unpack function, + * right now, we only need to do prepack/unpack for quantized::linear + * and quantized::conv2d. + */ +TORCH_API void InsertPrepackUnpack(std::shared_ptr& graph); + +/** \brief Insert pack and unpack function in all graphs + * of module + * + * Go through graphs of all the methods of all child modules + * and call InsertPrepackUnpack on the graph. + */ +TORCH_API void InsertPrepackUnpack(Module& module); + +TORCH_API script::Module Finalize( + script::Module& module, + QuantType quant_type = QuantType::STATIC, + const std::vector& preserved_attrs = + std::vector()); + +TORCH_API void FoldQuantizedPrepackingOps(Module& module); + +TORCH_API Module FinalizeOnDevicePTQ( + Module& module, + QuantType quant_type, + const std::string& method_name); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h new file mode 100644 index 0000000000000000000000000000000000000000..b316fe2adab92911b91ae7c7cf2bad050fc2afc4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +TORCH_API void FuseQuantizedAddRelu(std::shared_ptr& graph); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h new file mode 100644 index 0000000000000000000000000000000000000000..b5a5adf40b65c6838b23d5f2999b8058d7e825cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h @@ -0,0 +1,216 @@ +#pragma once +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +using graph_rewrite_helper::getFuncName; + +// Vector of a module and the name of its method +using ModuleMethodVector = std::vector>; +// Map of quantization parameter name and value +// for example _scale, _zero_point, +// _scalar_type and _axis(for per channel quantization) +using QParamVector = std::vector>; + +// =========== helper functions for Value ========= +// Check if a value is weight, since we need to use weight observer +// for weight +TORCH_API bool isWeight(Value* v); + +// Check if a value is bias for conv and linear, which we do not +// quantize +TORCH_API bool isBiasOfConvOrLinear(Value* v); + +TORCH_API bool isEmbeddingBagNonInput(Value* v); + +// Get the use as scalar input of clamp ops for the input value +c10::optional getClampScalarInputUse(Value* v); + +// For a given value `v`, get the list of values that we need to check +// if they are observed/quantized or not, if so, we can say the +// `v` is also observed/quantized, since we can derive +// the quantization parameters for `v` given the list of values +TORCH_API std::vector getPassThroughInputs(Value* v); + +// Clones the method by the name of orig_method_name into new_method_name method +TORCH_API void cloneMethod( + Module& module, + const std::string& orig_method_name, + const std::string& new_method_name); + +// Check if a value in the graph is a Scalar value +TORCH_API bool isScalar(Value* v); + +// Check if value is the input of the graph +TORCH_API bool hitGraphInput(Value* value); + +// Converts a mangled name, such as +// __torch__.torch.ao.nn.quantized.modules.conv.___torch_mangle_7.Conv2d +// into an unmangled name, such as +// __torch__.torch.ao.nn.quantized.modules.conv.Conv2d +TORCH_API std::string removeTorchMangle(const std::string& orig_name); + +// Return the module name that corresponds to the value. +TORCH_API c10::optional getModuleName(Value* value); + +// =========== helper functions for Node ========= +TORCH_API bool isSingleInputGeneralShapeAtenFunction(Node* n); + +TORCH_API bool isSingleInputGeneralValueAtenFunction(Node* n); + +TORCH_API bool isSingleInputGeneralCallFunction(Node* n); + +TORCH_API bool isSingleInputGeneralAtenFunction(Node* n); + +TORCH_API bool isClamp(Node* n); + +// Check if the node will produce the same result regardless of whether +// the input tensor is quantized or not, example: aten::size +TORCH_API bool isTensorInfoNode(Node* n); + +// Check if this the propagate op that has single input, e.g. aten::cat +TORCH_API bool isPropagateQuantSingleInputOp(Node* n); + +// Check if this is the propagate op that has two inputs, e.g. aten::add +TORCH_API bool isPropagateQuantBinaryOp(Node* n); + +// Check if this is the node that we'll quantize or not quantize depending on +// whether the input of the node is quantized, example: aten::cat +TORCH_API bool isPropagateQuantOp(Node* n); + +// Check if the node is a binary op like aten::add and aten::mul and +// if the input 1 is a scalar, these ops will be quantized to +// quantized::{op}_scalar +TORCH_API bool isBinaryOpWithScalarInput(Node* n); + +TORCH_API c10::optional> getFixedQParams( + Node* n); + +// We don't want to analyze the graph for some `builtin` CallFunctions +// like `linear` because we want to preserve the op boundary +TORCH_API bool userDefinedCallFunction(Node* n); + +// Check if the node has scalar input +TORCH_API bool hasScalarInput(Node* n); + +// Check if a node is quantizable +TORCH_API bool nodeQuantizable( + Node* n, + QuantType quant_type = QuantType::STATIC); + +// Nodes which only require quantization of weight value, eg. embedding_bag +bool isWeightOnlyStaticQuantOp(Node* n); + +// Check if a use of the value is quantizable, this depends on +// both the use node and the offset +TORCH_API bool useQuantizable(const Use& use, QuantType quant_type); + +// Given a CallFunction node, extract the graph of the called function +TORCH_API std::shared_ptr getCallFunctionGraph(Node* n); + +// Check if `use` is a CallFunction of name `func_name` and if value +// `v` is the nth argument (if provided) of the function +bool matchCallFuncToUse( + const Use& use, + const std::string& func_name, + c10::optional nth_arg); + +// Check if `use` is a AtenFunction of name `func_name` and if value +// `v` is the nth argument (if provided) of the function +bool matchAtenFuncToUse( + const Use& use, + const std::string& func_name, + c10::optional nth_arg); + +// =========== helper functions for Block ========= +// checks if a block will always raise an Exception +TORCH_API bool alwaysRaisesException(Block* block); + +// =========== helper functions for Module ========== +// TODO: remove +TORCH_API std::vector getModuleAccessPath( + Value* instance, + Value* self); +// TODO: remove +TORCH_API Module +findChildModule(const Module& module, const std::vector& path); + +// Given an CallMethod node, get the module instance corresponding +// to the instance Value +// TODO: refactor all current uses of this function to the Opt one +TORCH_API Module getInvokedModule(Module& module, Node* n, Value* self); + +// Given an CallMethod node, get the module instance corresponding +// to the instance Value if the instance is a module, otherwise return +// c10::nullopt +c10::optional getInvokedModuleOpt( + const Module& module, + Node* n, + Value* self); + +// ==================== filter functions for matches ============== +// filter to check Value `vname` is a constant of int value `value` +bool is_int_constant( + const Match& match, + const std::unordered_map& vmap, + const std::string& vname, + int value); + +// filter to check if the %alpha argument of aten::add is constant 1 +bool aten_add_alpha_is_one( + const Match& match, + const std::unordered_map& vmap); + +// filter to check if the functional in CallFunction is relu +bool is_functional_relu( + const Match& match, + const std::unordered_map& vmap); + +// filter to check if the module is torch.nn.ReLU +bool is_relu_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_linear_module( + const Match& match, + const std::unordered_map& vmap); + +// TODO: add a macro to declare the filters +bool is_conv1d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv2d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv3d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv_transpose1d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv_transpose2d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_batchnorm2d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_batchnorm3d_module( + const Match& match, + const std::unordered_map& vmap); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h new file mode 100644 index 0000000000000000000000000000000000000000..6fa7fe04491122fc40ed5c2309c6c08cf01d57d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include + +namespace std { + +template <> +struct hash { + inline size_t operator()(const torch::jit::Module& arg) const { + return std::hash>()(arg._ivalue()); + } +}; + +} // namespace std + +namespace torch { +namespace jit { + +using QConfig = std::tuple; +using QConfigDict = std::unordered_map>; + +/** \brief Insert observer module and observer function call for + * the Tensors that needs to be observed. + * + * For each Tensor that needs to be observed in the method, insert observer + * module to the input module and add forward calls of observer to the specified + * method. + * + * \param module the input module + * \param method_name the method we want to insert observers for + * \param qconfig_dict the qconfig dictionary that specifies how + * each module is going to be quantized + * \param inplace whether we want to do inplace modification to the input module + * or clone the module + * \param is_dynamic whether the dynamic quantization script is being used. + */ +TORCH_API Module InsertObservers( + Module& module, + const std::string& method_name, + const QConfigDict& qconfig_dict, + bool inplace, + QuantType quant_type = QuantType::STATIC); + +/** \brief Insert observer module and observer method for + * the Tensors that needs to be observed. + * + * For each Tensor that needs to be observed in the method, insert observer + * module to the input module and observe_ methods to the module. + * This method is clone of mehtod_name with forward calls of observer added. + * + * \param module the input module + * \param method_name the method we want to insert observers for + * \param qconfig_dict the qconfig dictionary that specifies how + * each module is going to be quantized + * \param inplace whether we want to do inplace modification to the input module + * or clone the module + * \param is_dynamic whether the dynamic quantization script is being used. + */ +TORCH_API Module InsertObserversForOnDevicePTQ( + Module& module, + const std::string& method_name, + const QConfigDict& qconfig_dict, + bool inplace, + QuantType quant_type = QuantType::STATIC); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h new file mode 100644 index 0000000000000000000000000000000000000000..de2b31fdba7ca80223bad52847b78060868ba9ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +/** Replicate quantize node for prim::If blocks, so that we can match + * quantization patterns in prim::If blocks + */ +TORCH_API void ReplicateQuant(std::shared_ptr& graph); + +/** Replicate dequantize node for each use, so that we can match + * quantization patterns + */ +TORCH_API void ReplicateDeQuant(std::shared_ptr& graph); + +/** \brief Insert quantize - dequantize calls to the Tensors + * that are observed in insert_observers pass + * + * For each Tensor that is observed, get the observer module and call + * calculate_qparam on the observer module to get quantization parameters + * and add quantize - int_repr - dequantize function calls using these + * parameters we also have special handling for quantizing "bias" right now. + * + * \param module the input module + * \param method_name the method we want to insert quantization calls for + */ +TORCH_API Module InsertQuantDeQuant( + Module& module, + const std::string& method_name, + bool inplace, + bool debug, + QuantType quant_type = QuantType::STATIC); + +TORCH_API Module InsertQuantDeQuantOnDevicePTQ( + Module& module, + const std::string& method_name, + bool inplace, + bool debug, + QuantType quant_type = QuantType::STATIC); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h new file mode 100644 index 0000000000000000000000000000000000000000..851548862dfc4779bf03e40c6291847c0bbe1eed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h @@ -0,0 +1,1272 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct QuantFusionInfo { + std::string quantized_op_name; + std::string pattern; + std::string replacement; + std::vector filters = {}; +}; + +namespace { +std::string getExtraArgList(std::vector extra_args) { + return std::accumulate( + extra_args.begin(), + extra_args.end(), + std::string(), + [](std::string acc, const std::string& arg) { return acc + ", " + arg; }); +} + +// Get the pattern we want to replace the match with +std::string getAtenOpPattern( + const std::string& graph_header, + const std::string& op_name, + const std::vector& extra_op_args, + bool scalar_args = false) { + std::vector _extra_op_args = extra_op_args; + std::string aten_op_pattern = graph_header; + if (scalar_args) { + for (const auto& extra_arg : _extra_op_args) { + aten_op_pattern + .append(R"( + )") + .append(extra_arg) + .append("_scalar = aten::item(") + .append(extra_arg) + .append(")"); + } + + for (auto& _extra_op_arg : _extra_op_args) { + _extra_op_arg.append("_scalar"); + } + } + const auto& extra_op_arg_list = getExtraArgList(std::move(_extra_op_args)); + aten_op_pattern += R"( + %r = )"; + aten_op_pattern += op_name + "(" + "%a_quant" + extra_op_arg_list + ")"; + aten_op_pattern += R"( + return (%r) )"; + return aten_op_pattern; +} + +// generate ops for quantize pattern for a scalar value +std::string getQuantizeForScalar(const std::string& value) { + // 6 is `torch.float` ScalarType, we are creating a float scalar + // tensor from a scalar value + std::string quantize_pattern = R"( + )" + + value + "_float_scalar_type : int = prim::Constant[value=6]()"; + quantize_pattern += R"( + )" + + value + "_none : None = prim::Constant()"; + quantize_pattern += R"( + )" + + value + "_tensor : Tensor = aten::scalar_tensor(" + value + ", " + value + + "_float_scalar_type"; + for (const auto i : c10::irange(3)) { + (void)i; // Suppress unused variable warning + quantize_pattern += ", " + value + "_none"; + } + quantize_pattern += ")"; + quantize_pattern += + R"( + )" + + value + "_quant = aten::quantize_per_tensor(" + value + "_tensor" + + getExtraArgList( + {value + "_scale", value + "_zero_point", value + "_dtype"}) + + ")"; + return quantize_pattern; +} + +std::string getDequantize(const std::string& value) { + return R"( + )" + + value + "_dequant = aten::dequantize(" + value + "_quant)"; +} + +std::string getItem(const std::string& value) { + return R"( + )" + + value + "_scalar : float = aten::item(" + value + "_dequant)"; +} + +// Patterns for the ops that inherit parameters from input +std::string getInputTensorQParamOpPattern( + const std::string& op_name, + const std::vector& extra_op_args) { + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string op_pattern = "graph(%a_quant" + extra_op_arg_list + "):" + R"( + %a_dequant = aten::dequantize(%a_quant) + %r = )" + + op_name + "(" + "%a_dequant" + extra_op_arg_list + ")" + R"( + %r_scale : float = aten::q_scale(%a_quant) + %r_zero_point : int = aten::q_zero_point(%a_quant) + %r_dtype : int = prim::dtype(%a_quant) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + return op_pattern; +} + +// QuantFusionInfo for the ops that inherit parameters from input +QuantFusionInfo getInputTensorQParamOpFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args) { + std::string op_pattern = + getInputTensorQParamOpPattern(op_name, extra_op_args); + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):"; + std::string op_replacement = + getAtenOpPattern(graph_header, op_name, extra_op_args); + + return {op_name, std::move(op_pattern), std::move(op_replacement)}; +} + +// quant fusion for ops like `quantized::add_scalar`, `quantized::mul_scalar` +QuantFusionInfo getBinaryOpScalarFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args, + const std::string& quantized_op_name, + const std::vector& extra_quantized_op_args, + const std::vector& filters = {}) { + std::string op_pattern = + getInputTensorQParamOpPattern(op_name, extra_op_args); + + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):"; + std::string op_replacement = getAtenOpPattern( + graph_header, quantized_op_name, extra_quantized_op_args); + + return {op_name, std::move(op_pattern), std::move(op_replacement), filters}; +} + +QuantFusionInfo getClampOpFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args) { + std::vector header_args = extra_op_args; + std::vector input_qparams = {"_scale", "_zero_point", "_dtype"}; + for (const auto& arg : extra_op_args) { + for (const auto& qparam : input_qparams) { + header_args.push_back(arg + qparam); + } + } + for (const auto& qparam : input_qparams) { + header_args.push_back("%r" + qparam); + } + const auto& extra_header_arg_list = getExtraArgList(std::move(header_args)); + std::string graph_header = "graph(%a_quant" + extra_header_arg_list + "):"; + std::string op_pattern = graph_header; + for (const auto& arg : extra_op_args) { + op_pattern += getQuantizeForScalar(arg); + op_pattern += getDequantize(arg); + op_pattern += getItem(arg); + } + op_pattern += getDequantize("%a"); + op_pattern += R"( + %r = )"; + std::vector scalar_extra_args; + scalar_extra_args.reserve(extra_op_args.size()); + for (const auto& arg : extra_op_args) { + scalar_extra_args.push_back(arg + "_scalar"); + } + op_pattern += op_name + "(" + "%a_dequant" + + getExtraArgList(std::move(scalar_extra_args)) + ")"; + // IR pattern common to all ops that inherit qparam from input + op_pattern += R"( + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string aten_op_pattern = + getAtenOpPattern(graph_header, op_name, extra_op_args); + + return {op_name, std::move(op_pattern), std::move(aten_op_pattern)}; +} + +// Patterns for the ops that has fixed quantization parameters +QuantFusionInfo getFixedQParamOpFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args, + bool is_symmetric) { + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):"; + std::string op_pattern = graph_header; + op_pattern += R"( + %a_dequant = aten::dequantize(%a_quant) + %r = )"; + op_pattern += op_name + "(" + "%a_dequant" + extra_op_arg_list + ")"; + // IR pattern common to all ops with fixed quantization parameters for + // asymetric quantization + std::string asym_fixed_qparam_op_suffix = R"( + %r_scale : float = prim::Constant[value=0.00390625]() + %r_zero_point : int = prim::Constant[value=0]() + %r_dtype : int = prim::Constant[value=13]() + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string sym_fixed_qparam_op_suffix = R"( + %r_scale : float = prim::Constant[value=0.0078125]() + %r_zero_point : int = prim::Constant[value=128]() + %r_dtype : int = prim::Constant[value=13]() + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + op_pattern += + is_symmetric ? sym_fixed_qparam_op_suffix : asym_fixed_qparam_op_suffix; + + std::string aten_op_pattern = + getAtenOpPattern(graph_header, op_name, extra_op_args); + + return {op_name, std::move(op_pattern), std::move(aten_op_pattern)}; +} + +// filter that checks %b_scalar is a scalar +bool input_b_is_scalar( + const Match& match, + const std::unordered_map& vmap) { + const auto& match_vmap = match.values_map; + auto b_scalar = match_vmap.at(vmap.at("b_scalar")); + return isScalar(b_scalar); +} + +// Patterns for ops that require observation for output quantization parameters +// Example: +// +// before fusion: +// +// graph(%a_quant, %r_scale, %r_zero_point, %r_dtype): +// %a_dequant = aten::dequantize(%a_quant) +// %r = {op_name}(%a_dequant, {extra_args}) +// %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, +// %r_dtype) return (%r_quant) +// +// after fusion: +// +// graph(%a_quant, %r_scale, %r_zero_point, %r_dtype): +// %r_quant = {quantized_op_name}(%a_quant, {extra_args}, %r_scale, +// %r_zero_point) return (%r_quant) +QuantFusionInfo getObservedQParamOpFusionInfo( + const std::string& fp_op_name, + const std::string& q_op_name, + const std::vector& fp_extra_args, + const std::vector& q_extra_args) { + const auto& fp_extra_arg_list = getExtraArgList(fp_extra_args); + const auto& q_extra_arg_list = getExtraArgList(q_extra_args); + + std::string op_pattern = "graph(%a_quant" + fp_extra_arg_list + + ", %r_scale, %r_zero_point, %r_dtype):" + R"( + %a_dequant = aten::dequantize(%a_quant) + %r = )" + + fp_op_name + "(" + "%a_dequant" + fp_extra_arg_list + ")" + R"( + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string aten_op_pattern = "graph(%a_quant" + fp_extra_arg_list + + ", %r_scale, %r_zero_point, %r_dtype):" + R"( + %r_quant = )" + + q_op_name + "(%a_quant" + q_extra_arg_list + + ", %r_scale, %r_zero_point)" + R"( + return (%r_quant) )"; + + return {q_op_name, std::move(op_pattern), std::move(aten_op_pattern)}; +} + +} // namespace + +static std::vector quant_fusion_pattern_and_replacements() { + // aten::conv1d + std::string conv1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv1d - aten::relu + std::string conv1d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv1d - aten::relu_ + std::string conv1d_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu_(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv1d + std::string quantized_conv1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv1d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // quantized::conv1d_relu + std::string quantized_conv1d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv1d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv2d + std::string conv2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv2d - aten::relu + std::string conv2d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv2d - aten::relu_ + std::string conv2d_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu_(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv2d + std::string quantized_conv2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv2d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // quantized::conv2d_relu + std::string quantized_conv2d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv2d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv3d + std::string conv3d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv3d - aten::relu + std::string conv3d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv3d - aten::relu_ + std::string conv3d_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu_(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv3d + std::string quantized_conv3d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv3d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // quantized::conv3d_relu + std::string quantized_conv3d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv3d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv_transpose1d + std::string conv_transpose1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv_transpose1d + std::string quantized_conv_transpose1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %r_quant = quantized::conv_transpose1d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv_transpose2d + std::string conv_transpose2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv_transpose1d + std::string quantized_conv_transpose2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %r_quant = quantized::conv_transpose2d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + std::string add_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string add_inplace_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu_(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_add_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add_(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_add_inplace_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add_(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu_(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string quantized_add_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %r = quantized::add_relu(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + // aten::linear + std::string linear = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a_dequant, %w_dequant, %b) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string linear_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %linear_out = aten::linear(%a_dequant, %w_dequant, %b) + %r = aten::relu(%linear_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string linear_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %linear_out = aten::linear(%a_dequant, %w_dequant, %b) + %r = aten::relu_(%linear_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::linear + std::string quantized_linear = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %r = quantized::linear(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r) )"; + + std::string quantized_linear_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %r = quantized::linear_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r) )"; + + std::string cat = R"( +graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype): + %input_dequant = aten::dequantize(%input_quant) + %r = aten::cat(%input_dequant, %dim) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string quantized_cat = R"( +graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype): + %r_quant = quantized::cat(%input_quant, %dim, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::add + std::string add = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add(%a_dequant, %b_dequant, %alpha) + %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype) + return (%r) )"; + + // TODO: add %dtype after when https://github.com/pytorch/pytorch/issues/34351 + // is fixed + // quantized::add + std::string quantized_add = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %r = quantized::add(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + // aten::add_ + std::string inplace_add = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add_(%a_dequant, %b_dequant, %alpha) + %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype) + return (%r) )"; + + auto add_scalar = getBinaryOpScalarFusionInfo( + "aten::add", + {"%b_scalar", "%alpha"}, + "quantized::add_scalar", + {"%b_scalar"}, + {aten_add_alpha_is_one, input_b_is_scalar}); + + auto add_scalar_out = getBinaryOpScalarFusionInfo( + "aten::add_", + {"%b_scalar", "%alpha"}, + "quantized::add_scalar_out", + {"%b_scalar", "%a_quant"}, + {aten_add_alpha_is_one, input_b_is_scalar}); + + // quantized::add_scalar_relu -- fusing quantized::add_scalar + // and aten::relu + auto quantized_add_scalar_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar(%a_quant, %b_scalar) + %r = aten::relu(%r_add) + return (%r) )"; + + auto quantized_add_scalar_inplace_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar(%a_quant, %b_scalar) + %r = aten::relu_(%r_add) + return (%r) )"; + + auto quantized_add_scalar_relu_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::add_scalar_relu(%a_quant, %b_scalar) + return (%r) )"; + + // quantized::add_scalar_relu_out -- fusing quantized::add_scalarOut + // and aten::relu + auto quantized_add_scalar_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu(%r_add) + return (%r) )"; + + auto quantized_add_scalar_inplace_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu_(%r_add) + return (%r) )"; + + auto quantized_add_scalar_relu_out_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::add_scalar_relu_out(%a_quant, %b_scalar, %a_quant) + return (%r) )"; + + // quantized::batch_norm + std::string batch_norm = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %a_dequant = aten::dequantize(%a_quant) + %r_bn = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7) + %r = aten::quantize_per_tensor(%r_bn, %scale, %zero_point, %scalar_type) + return (%r) )"; + std::string quantized_batch_norm = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %r = quantized::batch_norm(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point) + return (%r) )"; + + std::string batch_norm_relu = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %a_dequant = aten::dequantize(%a_quant) + %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7) + %relu = aten::relu(%bn_out) + %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type) + return (%r) )"; + std::string batch_norm_inplace_relu = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %a_dequant = aten::dequantize(%a_quant) + %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7) + %relu = aten::relu_(%bn_out) + %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type) + return (%r) )"; + + std::string quantized_batch_norm_relu = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %r = quantized::batch_norm_relu(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point) + return (%r) )"; + + // aten::mul + std::string mul = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul(%a_dequant, %b_dequant) + %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype) + return (%r) )"; + + // aten::mul_ + std::string inplace_mul = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul_(%a_dequant, %b_dequant) + %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype) + return (%r) )"; + + // quantized::mul + std::string quantized_mul = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %r = quantized::mul(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + auto mul_scalar = getBinaryOpScalarFusionInfo( + "aten::mul", + {"%b_scalar"}, + "quantized::mul_scalar", + {"%b_scalar"}, + {input_b_is_scalar}); + + auto mul_scalar_out = getBinaryOpScalarFusionInfo( + "aten::mul_", + {"%b_scalar"}, + "quantized::mul_scalar_out", + {"%b_scalar", "%a_quant"}, + {input_b_is_scalar}); + + // quantized::mul_relu + std::string mul_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul(%a_dequant, %b_dequant) + %r_relu = aten::relu(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string mul_inplace_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul(%a_dequant, %b_dequant) + %r_relu = aten::relu_(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_mul_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul_(%a_dequant, %b_dequant) + %r_relu = aten::relu(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_mul_inplace_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul_(%a_dequant, %b_dequant) + %r_relu = aten::relu_(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string quantized_mul_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %r = quantized::mul_relu(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + // quantized::mul_scalar_relu -- fusing quantized::mul_scalar + // and aten::relu + auto quantized_mul_scalar_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar(%a_quant, %b_scalar) + %r = aten::relu(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_inplace_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar(%a_quant, %b_scalar) + %r = aten::relu_(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_relu_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::mul_scalar_relu(%a_quant, %b_scalar) + return (%r) )"; + + // quantized::mul_scalar_relu_out -- fusing quantized::mul_scalarOut + // and aten::relu + auto quantized_mul_scalar_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_inplace_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu_(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_relu_out_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::mul_scalar_relu_out(%a_quant, %b_scalar, %a_quant) + return (%r) )"; + + // quantized::elu + std::string elu = R"( +graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %r = aten::elu(%a_dequant, %alpha, %scale, %input_scale) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string quantized_elu = R"( +graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype): + %r_quant = quantized::elu(%a_quant, %r_scale, %r_zero_point, %alpha, %scale, %input_scale) + return (%r_quant) )"; + + std::string elu_ = R"( +graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %r = aten::elu_(%a_dequant, %alpha, %scale, %input_scale) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // ============= General Ops that inherit quantization parameters from input + // tensor ============= + auto avg_pool1d = getInputTensorQParamOpFusionInfo( + "aten::avg_pool1d", + {"%kernel_size", + "%stride", + "%padding", + "%ceil_mode", + "%count_include_pad"}); + + auto avg_pool2d = getInputTensorQParamOpFusionInfo( + "aten::avg_pool2d", + {"%kernel_size", + "%stride", + "%padding", + "%ceil_mode", + "%count_include_pad", + "%divisor_override"}); + + std::string common_general_value_op = R"( + %r_scale : float = aten::q_scale(%a_quant) + %r_zero_point : int = aten::q_zero_point(%a_quant) + %r_dtype : int = prim::dtype(%a_quant) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + auto avg_pool3d = getInputTensorQParamOpFusionInfo( + "aten::avg_pool3d", + {"%kernel_size", + "%stride", + "%padding", + "%ceil_mode", + "%count_include_pad", + "%divisor_override"}); + + auto adaptive_avg_pool1d = getInputTensorQParamOpFusionInfo( + "aten::adaptive_avg_pool1d", {"%output_size"}); + + auto adaptive_avg_pool2d = getInputTensorQParamOpFusionInfo( + "aten::adaptive_avg_pool2d", {"%output_size"}); + + auto adaptive_avg_pool3d = getInputTensorQParamOpFusionInfo( + "aten::adaptive_avg_pool3d", {"%output_size"}); + + auto mean1 = getInputTensorQParamOpFusionInfo("aten::mean", {"%dim"}); + + auto mean2 = getInputTensorQParamOpFusionInfo( + "aten::mean", {"%dim", "%keepdim", "%out"}); + + auto upsample_nearest1d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest1d", {"%output_size", "%scale_factors"}); + + auto upsample_nearest2d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest2d", {"%output_size", "%scale_factors"}); + + auto upsample_nearest3d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest3d", {"%output_size", "%scale_factors"}); + + auto upsample_linear1d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_linear1d", + {"%output_size", "%align_corners", "%scale_factors"}); + + auto upsample_bilinear2d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_bilinear2d", + {"%output_size", "%align_corners", "%scale_factors"}); + + auto upsample_trilinear3d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_trilinear3d", + {"%output_size", "%align_corners", "%scale_factors"}); + + auto upsample_nearest1d = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest1d", {"%output_size", "%scales"}); + + auto upsample_nearest2d = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest2d", {"%output_size", "%scale_h", "%scale_w"}); + + auto upsample_nearest3d = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest3d", + {"%output_size", "%scale_d", "%scale_h", "%scale_w"}); + + auto upsample_linear1d = getInputTensorQParamOpFusionInfo( + "aten::upsample_linear1d", {"%output_size", "%align_corners", "%scales"}); + + auto upsample_bilinear2d = getInputTensorQParamOpFusionInfo( + "aten::upsample_bilinear2d", + {"%output_size", "%align_corners", "%scale_h", "%scale_w"}); + + auto upsample_trilinear3d = getInputTensorQParamOpFusionInfo( + "aten::upsample_trilinear3d", + {"%output_size", "%align_corners", "%scale_d", "%scale_h", "%scale_w"}); + + auto clamp = getClampOpFusionInfo("aten::clamp", {"%min", "%max"}); + + auto hardtanh = getClampOpFusionInfo("aten::hardtanh", {"%min", "%max"}); + + auto hardtanh_ = getClampOpFusionInfo("aten::hardtanh_", {"%min", "%max"}); + + auto leaky_relu = + getInputTensorQParamOpFusionInfo("aten::leaky_relu", {"%negative_slope"}); + + auto leaky_relu_ = getInputTensorQParamOpFusionInfo( + "aten::leaky_relu_", {"%negative_slope"}); + + // Ops with fixed quantization parameters + auto hardsigmoid = getFixedQParamOpFusionInfo("aten::hardsigmoid", {}, false); + + auto hardsigmoid_ = + getFixedQParamOpFusionInfo("aten::hardsigmoid_", {}, false); + + auto sigmoid = getFixedQParamOpFusionInfo("aten::sigmoid", {}, false); + + auto sigmoid_ = getFixedQParamOpFusionInfo("aten::sigmoid_", {}, false); + + auto tanh = getFixedQParamOpFusionInfo("aten::tanh", {}, true); + + auto tanh_ = getFixedQParamOpFusionInfo("aten::tanh_", {}, true); + + auto hardswish = getObservedQParamOpFusionInfo( + "aten::hardswish", "quantized::hardswish", {}, {}); + + auto hardswish_ = getObservedQParamOpFusionInfo( + "aten::hardswish_", "quantized::hardswish", {}, {}); + + auto layer_norm = getObservedQParamOpFusionInfo( + "aten::layer_norm", + "quantized::layer_norm", + {"%normalized_shape", "%weight", "%bias", "%eps", "%cudnn_enabled"}, + {"%normalized_shape", "%weight", "%bias", "%eps"}); + + auto group_norm = getObservedQParamOpFusionInfo( + "aten::group_norm", + "quantized::group_norm", + {"%num_groups", "%weight", "%bias", "%eps", "%cudnn_enabled"}, + {"%num_groups", "%weight", "%bias", "%eps"}); + + auto instance_norm = getObservedQParamOpFusionInfo( + "aten::instance_norm", + "quantized::instance_norm", + {"%weight", + "%bias", + "%running_mean", + "%running_var", + "%use_input_stats", + "%momentum", + "%eps", + "%cudnn_enabled"}, + {"%weight", "%bias", "%eps"}); + + return { + {"quantized::conv1d", std::move(conv1d), std::move(quantized_conv1d)}, + {"quantized::conv1d_relu", std::move(conv1d_relu), quantized_conv1d_relu}, + {"quantized::conv1d_relu", + std::move(conv1d_inplace_relu), + std::move(quantized_conv1d_relu)}, + {"quantized::conv2d", std::move(conv2d), std::move(quantized_conv2d)}, + {"quantized::conv2d_relu", std::move(conv2d_relu), quantized_conv2d_relu}, + {"quantized::conv2d_relu", + std::move(conv2d_inplace_relu), + std::move(quantized_conv2d_relu)}, + {"quantized::conv3d", std::move(conv3d), std::move(quantized_conv3d)}, + {"quantized::conv3d_relu", std::move(conv3d_relu), quantized_conv3d_relu}, + {"quantized::conv3d_relu", + std::move(conv3d_inplace_relu), + std::move(quantized_conv3d_relu)}, + {"quantized::conv_transpose1d", + std::move(conv_transpose1d), + std::move(quantized_conv_transpose1d)}, + {"quantized::conv_transpose2d", + std::move(conv_transpose2d), + std::move(quantized_conv_transpose2d)}, + {"quantized::linear", std::move(linear), std::move(quantized_linear)}, + {"quantized::linear_relu", std::move(linear_relu), quantized_linear_relu}, + {"quantized::linear_relu", + std::move(linear_inplace_relu), + std::move(quantized_linear_relu)}, + {"quantized::add_relu", + std::move(add_relu), + quantized_add_relu, + {aten_add_alpha_is_one}}, + {"quantized::add_relu", + std::move(add_inplace_relu), + quantized_add_relu, + {aten_add_alpha_is_one}}, + {"quantized::add_relu", + std::move(inplace_add_relu), + quantized_add_relu, + {aten_add_alpha_is_one}}, + {"quantized::add_relu", + std::move(inplace_add_inplace_relu), + std::move(quantized_add_relu), + {aten_add_alpha_is_one}}, + std::move(add_scalar), + std::move(add_scalar_out), + // note that these must come after quantized::add_scalar and + // quantized::add_scalar_out patterns + {"quantized::add_scalar_relu", + quantized_add_scalar_relu_pattern, + quantized_add_scalar_relu_replacement}, + {"quantized::add_scalar_relu", + quantized_add_scalar_inplace_relu_pattern, + quantized_add_scalar_relu_replacement}, + {"quantized::add_scalar_relu_out", + quantized_add_scalar_relu_out_pattern, + quantized_add_scalar_relu_out_replacement}, + {"quantized::add_scalar_relu_out", + quantized_add_scalar_inplace_relu_out_pattern, + quantized_add_scalar_relu_out_replacement}, + {"quantized::add", + std::move(add), + quantized_add, + {aten_add_alpha_is_one}}, + {"quantized::add", + std::move(inplace_add), + std::move(quantized_add), + {aten_add_alpha_is_one}}, + {"quantized::cat", std::move(cat), std::move(quantized_cat)}, + {"quantized::batch_norm", + std::move(batch_norm), + std::move(quantized_batch_norm)}, + {"quantized::batch_norm_relu", + std::move(batch_norm_relu), + quantized_batch_norm_relu}, + {"quantized::batch_norm_relu", + std::move(batch_norm_inplace_relu), + std::move(quantized_batch_norm_relu)}, + std::move(mul_scalar), + std::move(mul_scalar_out), + // note that these must come after quantized::mul_scalar and + // quantized::mul_scalar_out patterns + {"quantized::mul_scalar_relu", + quantized_mul_scalar_relu_pattern, + quantized_mul_scalar_relu_replacement}, + {"quantized::mul_scalar_relu", + quantized_mul_scalar_inplace_relu_pattern, + quantized_mul_scalar_relu_replacement}, + {"quantized::mul_scalar_relu_out", + quantized_mul_scalar_relu_out_pattern, + quantized_mul_scalar_relu_out_replacement}, + {"quantized::mul_scalar_relu_out", + quantized_mul_scalar_inplace_relu_out_pattern, + quantized_mul_scalar_relu_out_replacement}, + {"quantized::mul_relu", std::move(mul_relu), quantized_mul_relu}, + {"quantized::mul_relu", std::move(mul_inplace_relu), quantized_mul_relu}, + {"quantized::mul_relu", std::move(inplace_mul_relu), quantized_mul_relu}, + {"quantized::mul_relu", + std::move(inplace_mul_inplace_relu), + std::move(quantized_mul_relu)}, + {"quantized::mul", std::move(mul), quantized_mul}, + {"quantized::mul", std::move(inplace_mul), std::move(quantized_mul)}, + std::move(hardswish), + std::move(hardswish_), + std::move(layer_norm), + std::move(group_norm), + std::move(instance_norm), + {"quantized::elu", std::move(elu), quantized_elu}, + {"quantized::elu_", std::move(elu_), std::move(quantized_elu)}, + std::move(avg_pool1d), + std::move(avg_pool2d), + std::move(avg_pool3d), + std::move(adaptive_avg_pool1d), + std::move(adaptive_avg_pool2d), + std::move(adaptive_avg_pool3d), + std::move(mean1), + std::move(mean2), + std::move(upsample_nearest1d), + std::move(upsample_nearest2d), + std::move(upsample_nearest3d), + std::move(upsample_linear1d), + std::move(upsample_bilinear2d), + std::move(upsample_trilinear3d), + std::move(upsample_nearest1d_vec), + std::move(upsample_nearest2d_vec), + std::move(upsample_nearest3d_vec), + std::move(upsample_linear1d_vec), + std::move(upsample_bilinear2d_vec), + std::move(upsample_trilinear3d_vec), + std::move(clamp), + std::move(hardtanh), + std::move(hardtanh_), + std::move(leaky_relu), + std::move(leaky_relu_), + // fixed qparam ops + std::move(hardsigmoid), + std::move(hardsigmoid_), + std::move(sigmoid), + std::move(sigmoid_), + std::move(tanh), + std::move(tanh_), + }; +} + +inline std::vector +dynamic_quantized_linear_pattern_and_replacements() { + std::string linear_dynamic = R"( +graph(%packed_params, %a): + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a, %w_dequant, %b) + return (%r) )"; + + // This pattern ignores reduce range + // Set the reduce range to default to true, since qnnpack backend ignores this + // argument. + std::string quantized_linear_dynamic = R"( +graph(%packed_params, %a): + %reduce_range : bool = prim::Constant[value=1]() + %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range) + return (%r) )"; + + return { + {"quantized::linear_dynamic", + std::move(linear_dynamic), + std::move(quantized_linear_dynamic)}, + }; +} + +static std::vector +dynamic_quant_fusion_pattern_and_replacements() { + std::string linear_dynamic = R"( +graph(%packed_params, %a, %reduce_range, %a_dtype): + %a_scale : float, %a_zero_point : int = aten::_choose_qparams_per_tensor(%a, %reduce_range) + %a_quant = aten::quantize_per_tensor(%a, %a_scale, %a_zero_point, %a_dtype) + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a_dequant, %w_dequant, %b) + return (%r) )"; + + std::string quantized_linear_dynamic = R"( +graph(%packed_params, %a, %reduce_range, %a_dtype): + %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range) + return (%r) )"; + + std::string linear_dynamic_fp16 = R"( +graph(%packed_params, %a): + %w_unpacked : Tensor, %b : Tensor? = quantized::linear_unpack_fp16(%packed_params) + %r = aten::linear(%a, %w_unpacked, %b) + return (%r) )"; + + std::string quantized_linear_dynamic_fp16 = R"( +graph(%packed_params, %a): + %r = quantized::linear_dynamic_fp16(%a, %packed_params) + return (%r) )"; + + return { + {"quantized::linear_dynamic", + std::move(linear_dynamic), + std::move(quantized_linear_dynamic)}, + {"quantized::linear_dynamic_fp16", + std::move(linear_dynamic_fp16), + std::move(quantized_linear_dynamic_fp16)}, + }; +} + +static std::vector linear_prepack_unpack_patterns() { + std::string linear_with_quant = R"( +graph(%a_dequant, %w_quant, %b): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a_dequant, %w_dequant, %b) + return (%r) )"; + + std::string linear_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b): + %packed_params = quantized::linear_prepack(%w_quant, %b) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::linear(%a_dequant, %w_dequant, %b_unpacked) + return (%r) )"; + std::string linear_fp16_with_cast = R"( +graph(%w, %a_dq, %b): + %fp16_tensor = aten::_saturate_weight_to_fp16(%w) + %r = aten::linear(%a_dq, %fp16_tensor, %b) + return (%r) )"; + std::string linear_fp16_with_prepack = R"( +graph(%w, %a_dq, %b): + %packed_params = quantized::linear_prepack_fp16(%w, %b) + %w_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack_fp16(%packed_params) + %r = aten::linear(%a_dq, %w_unpacked, %b_unpacked) + return (%r) )"; + + return { + {"linear_prepack_unpack", + std::move(linear_with_quant), + std::move(linear_with_quant_prepack)}, + {"linear_fp16_prepack_unpack", + std::move(linear_fp16_with_cast), + std::move(linear_fp16_with_prepack)}, + }; +} + +static std::vector conv_prepack_unpack_patterns() { + std::string conv1d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv1d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv1d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv2d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv2d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv2d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv3d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv3d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %packed_params : __torch__.torch.classes.quantized.Conv3dPackedParamsBase = quantized::conv3d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv3d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv_transpose1d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + std::string conv_transpose1d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose1d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + std::string conv_transpose2d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + std::string conv_transpose2d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose2d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + return { + {"conv1d_prepack_unpack", + std::move(conv1d_with_quant), + std::move(conv1d_with_quant_prepack)}, + {"conv2d_prepack_unpack", + std::move(conv2d_with_quant), + std::move(conv2d_with_quant_prepack)}, + {"conv3d_prepack_unpack", + std::move(conv3d_with_quant), + std::move(conv3d_with_quant_prepack)}, + {"conv_transpose1d_prepack_unpack", + std::move(conv_transpose1d_with_quant), + std::move(conv_transpose1d_with_quant_prepack)}, + {"conv_transpose2d_prepack_unpack", + std::move(conv_transpose2d_with_quant), + std::move(conv_transpose2d_with_quant_prepack)}}; +} + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h new file mode 100644 index 0000000000000000000000000000000000000000..ac4afe90ed9ea577a760becf2a2d760a6bd74d60 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h @@ -0,0 +1,15 @@ +#pragma once +#include +#include + +namespace torch { +namespace jit { + +// Quantization type (dynamic quantization, static quantization). +// Should match the Python enum in quantize_jit.py +enum QuantType : std::uint8_t { DYNAMIC = 0, STATIC }; + +std::ostream& operator<<(std::ostream& os, QuantType t); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h new file mode 100644 index 0000000000000000000000000000000000000000..c1cbf1b27bb32296d70cc7b2a943750665772daa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +using PrePackParamFilterFn = std::function; + +TORCH_API std::unordered_set RegisterPrePackParams( + Module& m, + const std::string& method_name, + const PrePackParamFilterFn& is_packed_param, + const std::string& attr_prefix); + +TORCH_API std::string joinPaths(const std::vector& paths); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h new file mode 100644 index 0000000000000000000000000000000000000000..75f36313b3a1510dec9ec8107b1e5d1c2a781c49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// updates the types of tuples according to the type of their current inputs. +TORCH_API void RefineTupleTypes(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..e029383379f5658208a1f5806710bba7d47ce6b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Considering prim::RaiseException nodes unreachable, simplify prim::If nodes +// when one of the branches contains prim::RaiseException. +// +// This pass is illegal in general case as the modified graph might not throw +// an exception that the original graph would throw. The purpose of the pass is +// to cleanup the graph in a "risky" way by removing pathways leading to +// RaiseExceptions nodes. In some sense, this pass could be considered as a +// "Release" mode, while the original graph was in a "Debug" mode. +// The pass should only be used when such transformation is guaranteed to be +// safe by some other mechanisms. For instance, when we know exact shapes of +// tensors flowing through the graph and tensors with such shapes never cause +// exceptions. +TORCH_API void EliminateExceptions(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h new file mode 100644 index 0000000000000000000000000000000000000000..8a484a839e552dc02a4a04e660d9d173e473f2c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void RemoveExpands(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h new file mode 100644 index 0000000000000000000000000000000000000000..b574786c0bb1cf1269816edec2ef5d13980bd5e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void RemoveRedundantProfiles(std::shared_ptr& graph); +TORCH_API void RemoveRedundantProfiles(Block* block, AliasDb& db); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h new file mode 100644 index 0000000000000000000000000000000000000000..1f3fbf6cac88d082a1a55ae0b9b85667f8d83561 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Find the valid upgrader graph for the upgrader and cache the result +// for later lookups. Will error out if there is no valid upgrader graph +// provided for the upgrader name. +std::shared_ptr getUpgraderGraph(const std::string& upgrader_name); + +TORCH_API void ReplaceOldOperatorsWithUpgraders(std::shared_ptr graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h new file mode 100644 index 0000000000000000000000000000000000000000..48ce9fdb9ed208441959974b015026bda98f7f06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// A map which stores if an activation operator can perform type promotion +const std::unordered_map activation_type_promotion_mapping = { + {aten::sigmoid, true}, + {aten::tanh, true}, + {aten::celu, false}, + {aten::elu, false}, + {aten::gelu, false}, + {aten::glu, false}, + {aten::hardshrink, false}, + {aten::hardsigmoid, false}, + {aten::hardswish, false}, + {aten::hardtanh, false}, + {aten::leaky_relu, false}, + {aten::prelu, false}, + {aten::relu6, false}, + {aten::relu, false}, + {aten::rrelu, false}, + {aten::selu, false}, + {aten::silu, false}}; + +class FunctionalToInplaceRewriter { + public: + FunctionalToInplaceRewriter(std::shared_ptr graph); + + bool FunctionalToInplace(Block* block); + + private: + AliasDb* getOrCreateAliasDb() { + if (!aliasDb_) { + aliasDb_ = std::make_unique(graph_); + } + return aliasDb_.get(); + } + + bool CanBeInplace(Node* node); + + std::unique_ptr aliasDb_ = nullptr; + std::shared_ptr graph_; +}; + +// A common application scenario is to apply InplaceToFunctionalActivation +// before some JIT optimization passes, so that those passes are less +// constrained by in-place ops. After those passes are done, we can call +// FunctionalToInplaceActivation to recover in-place activation ops, +// so that we won't lose the performance benefit coming from memory reduction. + +// Replaces functional aten activation ops with their in-place equivalents +TORCH_API bool FunctionalToInplaceActivation( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h new file mode 100644 index 0000000000000000000000000000000000000000..83b5e657750b831bb4891569ef1b71cd87a95d1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// propagate autograd zero information through a gradient graph and +// remove grad_of blocks if present. +// Note: this is a very limited pass. It only propagates autograd zeros for +// operations generated by the symbolic autodiff code and cleans up +// AutogradAdds when possible. Outputs of other nodes are conservatively +// marked Unknown and not optimized. +TORCH_API void specializeAutogradZero(std::shared_ptr g); + +struct ProfilingRecord; + +TORCH_API void InsertProfileNodesForSpecializeAutogradZero(ProfilingRecord* pr); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h new file mode 100644 index 0000000000000000000000000000000000000000..02e00acac08d2d1b625c02524eb51c68569515d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API CanonicalizedSymbolicShape { + // TODO: Consider in the future if it is reasonable to + // merge code with SymbolicShape or VaryingShape while keeping + // the two not implicitly convertable (and cause bugs). + CanonicalizedSymbolicShape( + const c10::SymbolicShape& orig_shape, + std::unordered_map& ss_map) { + init(orig_shape, ss_map); + } + + CanonicalizedSymbolicShape(c10::SymbolicShape& orig_shape) { + std::unordered_map new_ssmap; + init(orig_shape, new_ssmap); + } + + size_t hash() const; + + c10::SymbolicShape toSymbolicShape( + std::unordered_map& inverse_ss_map) const; + + TORCH_API friend bool operator==( + const CanonicalizedSymbolicShape& a, + const CanonicalizedSymbolicShape& b); + + private: + c10::optional> values_; + + void init( + const c10::SymbolicShape& orig_shape, + std::unordered_map& ss_map); +}; + +// SHAPE CACHE API +TORCH_API c10::optional> +get_cached_shape_function( + const FunctionSchema* schema, + const std::vector& arg_vec); + +TORCH_API void cache_shape_function( + const FunctionSchema* schema, + const std::vector& arg_vec, + const std::vector& ret_vec); + +// For use in test code +TORCH_API void clear_shape_cache(); +TORCH_API size_t get_shape_cache_size(); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h new file mode 100644 index 0000000000000000000000000000000000000000..df491c8ea3d5a85fd53cc9fe9edffce9d76f7910 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Verify that alias annotations are correct. See impl for definition of +// "correct". +// +// This function expects a graph with a single op with `unqualifiedOpName`, plus +// the inputs that you would otherwise have passed to the graph executor. +TORCH_API void checkAliasAnnotation( + const std::shared_ptr& graph, + std::vector pythonInputs, + const std::string& unqualifiedOpName); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h new file mode 100644 index 0000000000000000000000000000000000000000..c455d3413e70425da01a376aa835bacb3d185442 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h @@ -0,0 +1,175 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// Uses a compressed index representation for faster comparisons +typedef c10::SparseBitVector<256> MemoryLocations; +namespace torch { +namespace jit { + +struct Element; +struct Value; +class MemoryDAG; + +using AliasTypeSet = std::vector; + +/** + * Helper to build up the points-to graph. + * + * We separate the "building" into a different class because it allows us to + * cache internally to MemoryDAG without worrying about how the DAG structure + * is mutated. + */ +class TORCH_API MemoryDAGBuilder { + public: + MemoryDAGBuilder() = default; + MemoryDAGBuilder(const MemoryDAGBuilder&) = delete; + MemoryDAGBuilder& operator=(const MemoryDAGBuilder&) = delete; + + // Make `from` point at `to`. + void makePointerTo(Element* from, Element* to); + + void addToContainedElements(Element* contained, Element* container); + + // Make a fresh Element (i.e. an Element that doesn't point to anything) and + // return it. + Element* makeFreshValue(const Value* v); + + friend MemoryDAG; + + private: + // `MemoryDAGBuilder` builds up `indexToElementMap_`, then uses + // the map to construct the `MemoryDAG` + std::vector> indexToElementMap_; +}; + +// class MemoryDAG +// +// This class tracks the "A points to B" graph for all values. It is used by +// AliasDb to provide a higher-level API. +// +// We maintain a DAG where: +// - Vertices (called "Elements") represent Values and +// other aliasing entities (e.g. the stuff inside a list) +// - Edges represent a "points-to" relationship. +// +// Leaves in this DAG are entities that don't point to anything, and thus +// correspond to unique "memory locations". +// +// So, by traversing the "points-to" graph to the leaves, you can determine +// which memory locations an element may point to. +class TORCH_API MemoryDAG { + public: + explicit MemoryDAG(std::unique_ptr builder) + : indexToElementMap_(std::move(builder->indexToElementMap_)) {} + // explicitly delete copy constructor because otherwise windows build is + // confused for an exported class see + // https://stackoverflow.com/a/51033485/105137 + MemoryDAG(const MemoryDAG&) = delete; + MemoryDAG& operator=(const MemoryDAG&) = delete; + + // Return the unique memory locations that `Element` might represent. + const MemoryLocations& getMemoryLocations(const Element* e) const; + + // Do `a` and `b` potentially share a memory location? + bool mayAlias(const Element* a, const Element* b) const; + + // Does `a` hold reference to any memory that is stored in `b`, or vice versa? + bool mayContainAlias(const Element* a, const Element* b) const; + + bool mayContainAlias(const Element* a, const at::ArrayRef b) const; + + bool mayContainAlias( + const at::ArrayRef a, + const at::ArrayRef b) const; + + // Converts from the compressed index representation + const Element* fromIndex(unsigned x) const; + Element* fromIndex(unsigned x); + void collectAllContainedMemoryLocations( + const Element* elem, + MemoryLocations& cont) const; + + /** + * The following methods are special cases where we need to mutate the + * internals of MemoryDAG for efficiency reasons. Don't call them unless you + * know what you're doing! In particular, don't add new mutating methods + * without ensuring that you are maintaining cache consistency for memory + * locations. + */ + + // Adding wildcards can trigger extremely expensive cache invalidations. This + // method adds them in a more efficient cache-aware way. + void setWildcards( + const std::unordered_set& wildcards, + const ska::flat_hash_map& elementMap, + const std::function& getWildcardElement); + Element* unsafeMakeFreshValue(const Value* v); + + private: + const MemoryLocations& getAllContainedMemoryLocations( + const Element* elem) const; + void collectAllContainedMemoryLocationsImpl( + const Element* elem, + MemoryLocations& cont) const; + std::vector> indexToElementMap_; +}; + +// `Element` represents a vertex in the points-to graph. It represents +// anything that could have an aliasing relationship--mostly IR +// `Value`s, but also wildcards or the type inside a container (e.g. `T` +// in `List[T]`) +struct Element { + Element(const Value* value_, unsigned index_); + // wildcard constructor + explicit Element(unsigned index_); + + // Index into the owning DAG's bit vector that represents this element. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + unsigned index; + + // All elements that this element *may* point to. It's possible to have + // multiple elements that you might point to due to control flow/complex ops + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + MemoryLocations pointsTo; + // Backreference for points-to. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + MemoryLocations pointedFrom; + + // Elements can contain other elements (e.g. List[Tensor]) + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + MemoryLocations containedElements; + + // The values that this element corresponds to. May be empty if this element + // doesn't represent a first-class value. + // This is for debug information only. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::unordered_set values; + + private: + // Make `from` point at `to`. + void makePointerTo(Element* from, Element* to); + + friend class MemoryDAG; + // We memoize the results of `getMemoryLocations` to speed up queries. + // A nullopt means that this cache is not yet populated. Since `MemoryDAG` is + // immutable, this cache should never need to be invalidated. + mutable c10::optional cachedMemoryLocations_; + + mutable c10::optional cachedAllContainedMemoryLocations_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..d68d1d6192d6c3b039181a2ebce3026cf9cee8d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +// Moved from shape_analysis.cpp + +// Requirements: +// dims : preserved from the first argument +// scalar type : preserved from the first argument (doesn't have to +// match other arguments) +// device : always matching and preserved +// tensor inputs : * +// tensor outputs : 1 +// NB: those ops (with slight adjustments) are good candidates for restarts. +// Knowing the type and device of weights or biases is usually enough to +// infer the output type. +std::shared_ptr nn_ops_first_input_preserving(); + +// Requirements: +// dims : Changed from first argument +// scalar type : preserved from the first argument +// device : always matching and preserved +// tensor inputs : 1 +// tensor outputs : 1 +std::shared_ptr ops_one_tensor_in_shape_transform(); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/optimization_utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/optimization_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..6018fbea6daa9e3b28c9aa45fdeeb25bc162cf37 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/optimization_utils.h @@ -0,0 +1,14 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +// Checks if the parameters, not including the +// first param are all constants. +bool nonConstantParameters(Node* n); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/subgraph_utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/subgraph_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..dd761409ca2d008260119e01ed731862d5b26658 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/subgraph_utils.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +// Utilities for dealing with nodes that contain subgraphs. +// +// They handle the complexity of editing inputs/outputs as you merge nodes in +// and out of subgraphs. +namespace SubgraphUtils { + +// Create a new subgraph node that contains only `n`. The new subgraph will have +// `subgraphKind` as its type. +// +// `n` is destroyed. +// +// Returns the new subgraph node. +TORCH_API Node* createSingletonSubgraph(Node* n, Symbol subgraphKind); + +// Creates a new subgraph that only contains `n`, amd updates the new outputs +// of the subgraph to have the aliasing properties of the original `n` outputs +TORCH_API Node* createSingletonSubgraphAndUpdateAliasing( + Node* to_merge, + Symbol subgraphKind, + AliasDb& db); + +// Merge a node into a subgraph node. If `toMerge` is also a subgraph, the +// subgraphs are merged. +// If `destroyNode` is true `toMerge` is destroyed. +// An optional argument 'vmap' could be used to retrieve value mappings. +// Values will be mapped to their new subgraph values +TORCH_API void mergeNodeIntoSubgraph( + Node* toMerge, + Node* subgraphNode, + bool destroyNode = true); + +// Merges a node into a subgraph node, and updates the new outputs of the +// subgraph to have the aliasing properties of the corresponding `to_merge` +// outputs +TORCH_API void mergeNodeIntoSubgraphAndUpdateAliasing( + Node* to_merge, + Node* subgraphNode, + AliasDb& db); + +TORCH_API std::vector unmergeAliasedOutputs( + Node* subgraphNode, + AliasDb& db); + +// Move nodes from a subgraph node to the outer graph. +// `subgraphNode` is destroyed. +TORCH_API void unmergeSubgraph(Node* subgraphNode); + +// Move `node_to_unmerge` and its descendants after `subgraphNode` +// promotes any dependencies of `node_to_unmerge` to subgraphNode outputs +TORCH_API void unmergeNode(Node* node_to_unmerge, Node* subgraphNode); + +TORCH_API bool unmergeOutputsAlisingInputs(Node* subgraphNode); + +TORCH_API bool unmergeAliasedOutputs(Node* subgraphNode); + +// Convenience function +std::shared_ptr getSubgraph(Node* n); + +TORCH_API std::string generateNameForGraph( + const std::shared_ptr& graph, + size_t maxlen = 40, + const std::string& prefix = "fused"); + +} // namespace SubgraphUtils +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd89bbcc22fffffef677a570923d469a6f129bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Try to replace an op that takes a list input with another op that takes a +// variadic number of arguments. +TORCH_API bool UseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +TORCH_API bool RemoveListMutationAndUseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +// Convenient functions for replacing aten::stack/aten::cat with their +// variadic versions. +TORCH_API bool UseVariadicCat(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicCat( + const std::shared_ptr& graph); + +TORCH_API bool UseVariadicStack(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicStack( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..395d885e8e2c3c99f5e1a6d4279c9e0e26894d07 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +TORCH_API void vulkanInsertPrePackedOps(std::shared_ptr& graph); +TORCH_API void vulkanInsertPrePackedOps(script::Module& module); +TORCH_API void vulkanFusePrePackedConvWithClamp(script::Module& module); +TORCH_API void vulkanFoldPrePackingOps(script::Module& module); +TORCH_API script::Module vulkanOptimizeForMobile( + const script::Module& module, + const std::set& optimization_blocklist, + const std::vector& preserved_methods); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h new file mode 100644 index 0000000000000000000000000000000000000000..3a56cfc7788fb81a55ddbad966f44afd9362c73b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h @@ -0,0 +1,280 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ONNX_NAMESPACE { +class ModelProto; +} + +namespace torch { +namespace jit { + +// This map is used to keep track of parameters that should be exported +// externally. When `defer_weight_export` is true, the returned map contains +// kv pairs that map {external reference name} -> {at::Tensor to be exported}. +// It is the responsibility of the caller to export these appropriately. +// +// For example, when exporting to a zip archive, the caller may write out files +// for each entry in the export map, with the filename being the key and the +// file contents being the raw tensor data. +using RawDataExportMap = std::unordered_map; + +using SymbolDimMap = std::map; + +using NodeNameMap = std::unordered_map; + +// Used for modularized export settling function and node attributes. +using NodeAttrNameMap = std:: + unordered_map>; + +TORCH_API std::tuple< + std::shared_ptr<::ONNX_NAMESPACE::ModelProto>, + RawDataExportMap, + SymbolDimMap, + bool, + NodeNameMap> +export_onnx( + const std::shared_ptr& graph, + const std::map& initializers, + int64_t onnx_opset_version, + const std::unordered_map< + std::string, + std::unordered_map>& dynamic_axes, + bool defer_weight_export = false, + ::torch::onnx::OperatorExportTypes operator_export_type = + ::torch::onnx::OperatorExportTypes::ONNX, + bool strip_doc_string = true, + bool keep_initializers_as_inputs = true, + const std::map& custom_opsets = {}, + bool add_node_names = true, + bool use_external_data_format = false, + const std::string& onnx_file_path = std::string(), + const NodeAttrNameMap& node_attr_to_name = {}); + +TORCH_API std::string serialize_model_proto_to_string( + const std::shared_ptr<::ONNX_NAMESPACE::ModelProto>& model_proto); + +TORCH_API void check_onnx_proto(const std::string& proto_string); + +// Serializer for both oldsyle and unified format TorchScript serialization +class TORCH_API ScriptModuleSerializer { + public: + explicit ScriptModuleSerializer( + caffe2::serialize::PyTorchStreamWriter& export_writer) + : writer_(export_writer), current_source_range_tag_(0) {} + + void writeFiles(const std::string& code_dir); + void serialize( + const Module& module, + const ExtraFilesMap& extra_files, + bool bytecode_format, + bool save_mobile_debug_info); + void serialize_unified_format(Module& module, uint64_t script_module_id); + SerializationStorageContext& storage_context(); + + ~ScriptModuleSerializer() = default; + + private: + void convertNamedType(const c10::NamedTypePtr& class_type); + void convertTypes(const at::NamedTypePtr& root_type); + void writeExtraFiles(const Module& module, const ExtraFilesMap& extra_files); + void writeByteCode(const Module& module, bool save_mobile_debug_info); + void writeArchive( + const IValue& value, + const std::string& archive_name, + const std::string& archive_dir, + const std::string& tensor_dir, + bool use_storage_context = false, + bool skip_tensor_data = false); + void updateSourceRangeTags(const SourceRangeRecords& ranges); + + caffe2::serialize::PyTorchStreamWriter& writer_; + std::vector constant_table_; + + std::unordered_set converted_types_; + PrintDepsTable class_deps_; + TypeNameUniquer type_name_uniquer_; + // qualifier, e.g. '__torch__.Bar' -> PythonPrint for the file that will be + // created + OrderedDict file_streams_; + // Used to keep references of storages around during serialization to solve + // for ABA memory reuse problem hit when storages are created/destroyed + // during serialization process. Also used to coordinate sharing of storages + // between Script and eager modules in torch.package. + SerializationStorageContext storage_context_; + + // Uniquely identifies a SourceRange in a model. + // SourceRanges are associated with Nodes of Graphs. + // However for mobile deployment we dont intend to ship + // full JIT with capabilities of reading code and constructing + // graphs. + // Instead we serialize the Code generated from graph of the methods. + // Code is serialized in bytecode format that contains instructions + // corresponding to the nodes of the graph. Since original graph is gone, the + // question is how do we identify where the ops, in serialized bytecode, come + // from in original model code. We do this in two parts. + // 1. Associate a unique tag to SourceRange. + // 2. Serialize this unique_tag. + // 2.1 Meaning save instead of + // + // 3. During serializing model for mobile, i.e. bytecode generation, + // save unique tag of SourceRange corresponding to the Node. + // 4. During deserialization, read all the debug_pkl, to construct a map + // of and use tag saved with OPs in bytecode + // to lookup the source range. + // Strictly speaking we will serialize InlinedCallStack directly, which + // contains SourceRange. This way we have access to entire callstack and not + // just source information about where the node is, since bytecode inlines the + // graph before saving it. + SourceRangeTagMap source_range_tags_; + int64_t current_source_range_tag_; +}; + +// For testing purposes +TORCH_API std::string pretty_print_onnx( + const std::shared_ptr& graph, + const std::map& initializers, + int64_t onnx_opset_version, + bool defer_weight_export, + ::torch::onnx::OperatorExportTypes operator_export_type = + ::torch::onnx::OperatorExportTypes::ONNX, + bool google_printer = false, + bool keep_initializers_as_inputs = true, + const std::map& custom_opsets = {}, + bool add_node_names = true); + +TORCH_API void ExportModule( + const Module& module, + std::ostream& out, + const ExtraFilesMap& metadata = ExtraFilesMap(), + bool bytecode_format = false, + bool save_mobile_debug_info = false, + bool use_flatbuffer = false); + +TORCH_API void ExportModule( + const Module& module, + const std::string& filename, + const ExtraFilesMap& metadata = ExtraFilesMap(), + bool bytecode_format = false, + bool save_mobile_debug_info = false, + bool use_flatbuffer = false); + +TORCH_API void ExportModule( + const Module& module, + const std::function& writer_func, + const ExtraFilesMap& metadata = ExtraFilesMap(), + bool bytecode_format = false, + bool save_mobile_debug_info = false, + bool use_flatbuffer = false); + +// Write the bytes of a pickle archive and the tensors referenced inside that +// archive +TORCH_API void writeArchiveAndTensors( + const std::string& archive_name, + const char* pickle_bytes, + size_t size, + const std::vector& tensors, + caffe2::serialize::PyTorchStreamWriter& out); + +// Surrounding system can install an additional hook to produce extra files +// with metadata based on environment every time a module is serialized. +using ExportModuleExtraFilesHook = std::function; +TORCH_API void SetExportModuleExtraFilesHook(ExportModuleExtraFilesHook hook); + +/** + * Generates new bytecode for a Script module and returns what the op list + * would be for a LiteScriptModule based off the current code base. If you + * have a LiteScriptModule and want to get the currently present + * list of ops call _export_operator_list instead. + */ +TORCH_API std::vector export_opnames(const Module& m); + +struct TORCH_API BytecodeEmitMode { + static bool is_default_value_for_unspecified_arg_enabled(); + static void set_default_value_for_unspecified_arg_enabled(bool enabled); + + static bool is_default_args_before_out_args_enabled(); + static void set_default_args_before_out_args_enabled(bool enabled); + + static bool is_emit_promoted_ops_enabled(); + static void set_default_emit_promoted_ops_enabled(bool enabled); +}; + +// RAII guard to switch the way JIT emits the bytecode for inputs. +// default_value_for_unspecified_arg: +// true: instruction of default argument values (like LOADC) is emitted. +// false: instruction of default argument values are not emitted. Instead +// they are fetched from operator schema. +// default_args_before_out_args (to forward compatibile support +// operators allowing out arguments and default arguments): +// true: the number of specified arguments will deserialized to (#all_args - +// #default_args). false: the number of specified arguments will deserialized to +// (#all_args). +struct TORCH_API BytecodeEmitModeGuard { + BytecodeEmitModeGuard( + bool enable_default_value_for_unspecified_arg, + bool enable_default_args_before_out_args, + bool enable_emit_promoted_ops) + : prev_default_value_for_unspecified_arg_mode( + BytecodeEmitMode::is_default_value_for_unspecified_arg_enabled()), + prev_default_args_before_out_args( + BytecodeEmitMode::is_default_args_before_out_args_enabled()), + prev_default_emit_promoted_ops( + BytecodeEmitMode::is_emit_promoted_ops_enabled()) { + BytecodeEmitMode::set_default_value_for_unspecified_arg_enabled( + enable_default_value_for_unspecified_arg); + BytecodeEmitMode::set_default_args_before_out_args_enabled( + enable_default_args_before_out_args); + BytecodeEmitMode::set_default_emit_promoted_ops_enabled( + enable_emit_promoted_ops); + } + ~BytecodeEmitModeGuard() { + BytecodeEmitMode::set_default_value_for_unspecified_arg_enabled( + prev_default_value_for_unspecified_arg_mode); + BytecodeEmitMode::set_default_args_before_out_args_enabled( + prev_default_args_before_out_args); + BytecodeEmitMode::set_default_emit_promoted_ops_enabled( + prev_default_emit_promoted_ops); + } + bool prev_default_value_for_unspecified_arg_mode; + bool prev_default_args_before_out_args; + bool prev_default_emit_promoted_ops; +}; + +TORCH_API IValue to_tuple(std::vector ivalues); +TORCH_API IValue +Table(const std::vector>& entries); + +// TODO remove these switches once interface call is rolled out. +TORCH_API void enableMobileInterfaceCallExport(); +bool getMobileInterfaceCallExport(); + +TORCH_API CompilationOptions getOptionsFromGlobal(); + +TORCH_API void save_jit_module( + const Module& module, + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap()); + +TORCH_API DetachedBuffer::UniqueDetachedBuffer save_jit_module_to_bytes( + const Module& module, + const ExtraFilesMap& extra_files = ExtraFilesMap()); + +TORCH_API void save_jit_module_to_write_func( + const Module& module, + const ExtraFilesMap& extra_files, + bool save_mobile_debug_info, + const std::function& writer_func); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer_jit.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer_jit.h new file mode 100644 index 0000000000000000000000000000000000000000..b43ab831f17735ae262b36b724bed5215c492f7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer_jit.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool register_flatbuffer_all(); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import.h new file mode 100644 index 0000000000000000000000000000000000000000..c8379f38810f70990969ca6b58afdb58748db87e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import.h @@ -0,0 +1,157 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace caffe2 { +namespace serialize { +class ReadAdapterInterface; +} // namespace serialize +} // namespace caffe2 + +namespace torch { +namespace jit { + +class DeserializationStorageContext; + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + const std::string& filename, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::istream& in, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::unique_ptr rai, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + const std::string& filename, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true, + bool restore_shapes = false); + +// For reading unified serialization format from torch.Package +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::shared_ptr reader, + std::shared_ptr storage_context, + c10::optional device, + std::string ts_id /* torchscript identifier inside package */); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::istream& in, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true, + bool restore_shapes = false); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::unique_ptr rai, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::shared_ptr rai, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +/// Loads a serialized `Module` from the given `istream`. +/// +/// The istream must contain a serialized `Module`, exported via +/// `torch::jit::ExportModule` in C++. +TORCH_API Module load( + std::istream& in, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module load( + std::istream& in, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +/// Loads a serialized `Module` from the given `filename`. +/// +/// The file stored at the location given in `filename` must contain a +/// serialized `Module`, exported either via `ScriptModule.save()` in +/// Python or `torch::jit::ExportModule` in C++. +TORCH_API Module load( + const std::string& filename, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module load( + const std::string& filename, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +/// Loads a serialized `Module` from the given shared_ptr `rai`. +/// +/// The reader adapter, which is for customized input stream, must contain a +/// serialized `Module`, exported either via `ScriptModule.save()` in +/// Python or `torch::jit::ExportModule` in C++. +TORCH_API Module load( + std::shared_ptr rai, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module load( + std::shared_ptr rai, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +TORCH_API Module jitModuleFromSourceAndConstants( + const IValue& ivalue, + const ExtraFilesMap& source, + const std::vector& constants, + int32_t version); + +TORCH_API Module parse_and_initialize_jit_module( + std::shared_ptr data, + size_t size, + ExtraFilesMap& extra_files, + c10::optional device = c10::nullopt); + +TORCH_API Module load_jit_module_from_file( + const std::string& filename, + ExtraFilesMap& extra_files, + c10::optional device = c10::nullopt); + +TORCH_API Module load_jit_module_from_stream( + std::istream& in, + ExtraFilesMap& extra_files, + c10::optional device = c10::nullopt); + +TORCH_API Module parse_and_initialize_jit_module( + std::shared_ptr data, + size_t size, + ExtraFilesMap& extra_files, + c10::optional device); + +TORCH_API c10::intrusive_ptr ObjLoaderFunc( + const at::StrongTypePtr& type, + IValue input); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_functions.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0de156bfb753c4aa78c7a0f62d9a167acb205ab9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_functions.h @@ -0,0 +1,16 @@ +#pragma once +#include + +// Functions that are used in both import and export processes +namespace torch { +namespace jit { +using c10::IValue; +IValue expect_field( + c10::ivalue::TupleElements& elements, + const std::string& expected_name, + size_t entry); +std::string operator_str( + const std::string& name, + const std::string& overloadname); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_source.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_source.h new file mode 100644 index 0000000000000000000000000000000000000000..9a720a81bcbb2085a4f3f21ff5e73d45d9afaf1b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_source.h @@ -0,0 +1,103 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using SourceLoader = std::function(const std::string&)>; + +struct SourceImporterImpl : public Resolver, + std::enable_shared_from_this { + SourceImporterImpl( + std::shared_ptr cu, + const std::vector* constant_table, + SourceLoader source_loader, + size_t version); + TypePtr findNamedType(const QualifiedName& name); + Function* findFunction(const QualifiedName& name); + void parseSourceIfNeeded(const std::string& qualifier); + void LEGACY_import_methods( + const Module& mod, + const std::shared_ptr& src); + + std::shared_ptr resolveValue( + const std::string& name, + GraphFunction& m, + const SourceRange& loc) override; + TypePtr resolveType(const std::string& name, const SourceRange& loc) override; + + private: + void importFunction(const std::string& qualifier, const Def& def); + void importNamedType(const std::string& qualifier, const ClassDef& class_def); + c10::optional attributeAssignmentSpecialHandlingHack( + const QualifiedName& qualified_classname, + const Assign& assign); + void importClass( + const QualifiedName& qualified_classname, + const ClassDef& class_def, + bool is_module); + void importEnum( + const QualifiedName& qualified_name, + const ClassDef& enum_def); + void importNamedTuple( + const QualifiedName& qualified_name, + const ClassDef& named_tuple_def); + + void parsePossibleVersionNumber(Lexer& L); + + void parseImports(Lexer& L); + + std::shared_ptr cu_; + std::unordered_map> env_; + SourceLoader source_loader_; + c10::optional version_ = c10::nullopt; + std::unordered_set loaded_sources_; + // named types and functions loaded from a file but not yet defined because + // their type has not been requested yet. + std::unordered_map to_be_defined_; +}; + +// Given a directory of serialized TorchScript sources, +// This class allows the loading of individual named types in source. +// Resolves the dependencies between source files and parses +// the source files as necessary. + +struct TORCH_API SourceImporter { + SourceImporter( + // The compilation unit that will own the imported source + std::shared_ptr cu, + const std::vector* constant_table, + SourceLoader loader, + size_t version); + + TypePtr loadType(const QualifiedName& name) const; + + // Add the methods defined in `src` to the module `mod`, using SourceImporter + // to resolve any classes via loadType + void LEGACY_import_methods( + const Module& mod, + const std::shared_ptr& src); + ~SourceImporter(); + + private: + std::shared_ptr pImpl; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/mobile_bytecode_generated.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/mobile_bytecode_generated.h new file mode 100644 index 0000000000000000000000000000000000000000..cffe8bc7a636457ea3452fba9c040a71cb71fa5e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/mobile_bytecode_generated.h @@ -0,0 +1,2599 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_MOBILEBYTECODE_TORCH_JIT_MOBILE_SERIALIZATION_H_ +#define FLATBUFFERS_GENERATED_MOBILEBYTECODE_TORCH_JIT_MOBILE_SERIALIZATION_H_ + +#include "flatbuffers/flatbuffers.h" + +// Ensure the included flatbuffers.h is the same version as when this file was +// generated, otherwise it may not be compatible. +static_assert(FLATBUFFERS_VERSION_MAJOR == 23 && + FLATBUFFERS_VERSION_MINOR == 3 && + FLATBUFFERS_VERSION_REVISION == 3, + "Non-compatible flatbuffers version included"); + +namespace torch { +namespace jit { +namespace mobile { +namespace serialization { + +struct Int; + +struct Bool; + +struct Double; + +struct PerTensorAffineSchema; + +struct QuantizedSchema; +struct QuantizedSchemaBuilder; + +struct TensorMetadata; +struct TensorMetadataBuilder; + +struct String; +struct StringBuilder; + +struct Device; +struct DeviceBuilder; + +struct List; +struct ListBuilder; + +struct IntList; +struct IntListBuilder; + +struct DoubleList; +struct DoubleListBuilder; + +struct BoolList; +struct BoolListBuilder; + +struct Tuple; +struct TupleBuilder; + +struct Dict; +struct DictBuilder; + +struct ObjectType; +struct ObjectTypeBuilder; + +struct Object; +struct ObjectBuilder; + +struct ComplexDouble; + +struct EnumValue; +struct EnumValueBuilder; + +struct Instruction; + +struct Operator; +struct OperatorBuilder; + +struct Arg; +struct ArgBuilder; + +struct Schema; +struct SchemaBuilder; + +struct DebugInfo; +struct DebugInfoBuilder; + +struct Function; +struct FunctionBuilder; + +struct StorageData; +struct StorageDataBuilder; + +struct IValue; +struct IValueBuilder; + +struct ExtraFile; +struct ExtraFileBuilder; + +struct Module; +struct ModuleBuilder; + +enum class TypeType : uint8_t { + UNSET = 0, + CLASS_WITH_FIELD = 1, + CUSTOM_CLASS = 2, + CLASS_WITH_SETSTATE = 3, + NON_OBJ = 4, + MIN = UNSET, + MAX = NON_OBJ +}; + +inline const TypeType (&EnumValuesTypeType())[5] { + static const TypeType values[] = { + TypeType::UNSET, + TypeType::CLASS_WITH_FIELD, + TypeType::CUSTOM_CLASS, + TypeType::CLASS_WITH_SETSTATE, + TypeType::NON_OBJ + }; + return values; +} + +inline const char * const *EnumNamesTypeType() { + static const char * const names[6] = { + "UNSET", + "CLASS_WITH_FIELD", + "CUSTOM_CLASS", + "CLASS_WITH_SETSTATE", + "NON_OBJ", + nullptr + }; + return names; +} + +inline const char *EnumNameTypeType(TypeType e) { + if (::flatbuffers::IsOutRange(e, TypeType::UNSET, TypeType::NON_OBJ)) return ""; + const size_t index = static_cast(e); + return EnumNamesTypeType()[index]; +} + +enum class IValueUnion : uint8_t { + NONE = 0, + Int = 1, + Bool = 2, + Double = 3, + ComplexDouble = 4, + TensorMetadata = 5, + String = 6, + List = 7, + Tuple = 8, + Dict = 9, + Object = 10, + IntList = 11, + DoubleList = 12, + BoolList = 13, + Device = 14, + EnumValue = 15, + Function = 16, + MIN = NONE, + MAX = Function +}; + +inline const IValueUnion (&EnumValuesIValueUnion())[17] { + static const IValueUnion values[] = { + IValueUnion::NONE, + IValueUnion::Int, + IValueUnion::Bool, + IValueUnion::Double, + IValueUnion::ComplexDouble, + IValueUnion::TensorMetadata, + IValueUnion::String, + IValueUnion::List, + IValueUnion::Tuple, + IValueUnion::Dict, + IValueUnion::Object, + IValueUnion::IntList, + IValueUnion::DoubleList, + IValueUnion::BoolList, + IValueUnion::Device, + IValueUnion::EnumValue, + IValueUnion::Function + }; + return values; +} + +inline const char * const *EnumNamesIValueUnion() { + static const char * const names[18] = { + "NONE", + "Int", + "Bool", + "Double", + "ComplexDouble", + "TensorMetadata", + "String", + "List", + "Tuple", + "Dict", + "Object", + "IntList", + "DoubleList", + "BoolList", + "Device", + "EnumValue", + "Function", + nullptr + }; + return names; +} + +inline const char *EnumNameIValueUnion(IValueUnion e) { + if (::flatbuffers::IsOutRange(e, IValueUnion::NONE, IValueUnion::Function)) return ""; + const size_t index = static_cast(e); + return EnumNamesIValueUnion()[index]; +} + +template struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::NONE; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Int; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Bool; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Double; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::ComplexDouble; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::TensorMetadata; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::String; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::List; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Tuple; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Dict; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Object; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::IntList; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::DoubleList; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::BoolList; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Device; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::EnumValue; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Function; +}; + +bool VerifyIValueUnion(::flatbuffers::Verifier &verifier, const void *obj, IValueUnion type); +bool VerifyIValueUnionVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) Int FLATBUFFERS_FINAL_CLASS { + private: + int64_t int_val_; + + public: + Int() + : int_val_(0) { + } + Int(int64_t _int_val) + : int_val_(::flatbuffers::EndianScalar(_int_val)) { + } + int64_t int_val() const { + return ::flatbuffers::EndianScalar(int_val_); + } + void mutate_int_val(int64_t _int_val) { + ::flatbuffers::WriteScalar(&int_val_, _int_val); + } +}; +FLATBUFFERS_STRUCT_END(Int, 8); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(1) Bool FLATBUFFERS_FINAL_CLASS { + private: + uint8_t bool_val_; + + public: + Bool() + : bool_val_(0) { + } + Bool(bool _bool_val) + : bool_val_(::flatbuffers::EndianScalar(static_cast(_bool_val))) { + } + bool bool_val() const { + return ::flatbuffers::EndianScalar(bool_val_) != 0; + } + void mutate_bool_val(bool _bool_val) { + ::flatbuffers::WriteScalar(&bool_val_, static_cast(_bool_val)); + } +}; +FLATBUFFERS_STRUCT_END(Bool, 1); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) Double FLATBUFFERS_FINAL_CLASS { + private: + double double_val_; + + public: + Double() + : double_val_(0) { + } + Double(double _double_val) + : double_val_(::flatbuffers::EndianScalar(_double_val)) { + } + double double_val() const { + return ::flatbuffers::EndianScalar(double_val_); + } + void mutate_double_val(double _double_val) { + ::flatbuffers::WriteScalar(&double_val_, _double_val); + } +}; +FLATBUFFERS_STRUCT_END(Double, 8); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) PerTensorAffineSchema FLATBUFFERS_FINAL_CLASS { + private: + double q_scale_; + int32_t q_zero_point_; + int32_t padding0__; + + public: + PerTensorAffineSchema() + : q_scale_(0), + q_zero_point_(0), + padding0__(0) { + (void)padding0__; + } + PerTensorAffineSchema(double _q_scale, int32_t _q_zero_point) + : q_scale_(::flatbuffers::EndianScalar(_q_scale)), + q_zero_point_(::flatbuffers::EndianScalar(_q_zero_point)), + padding0__(0) { + (void)padding0__; + } + double q_scale() const { + return ::flatbuffers::EndianScalar(q_scale_); + } + void mutate_q_scale(double _q_scale) { + ::flatbuffers::WriteScalar(&q_scale_, _q_scale); + } + int32_t q_zero_point() const { + return ::flatbuffers::EndianScalar(q_zero_point_); + } + void mutate_q_zero_point(int32_t _q_zero_point) { + ::flatbuffers::WriteScalar(&q_zero_point_, _q_zero_point); + } +}; +FLATBUFFERS_STRUCT_END(PerTensorAffineSchema, 16); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) ComplexDouble FLATBUFFERS_FINAL_CLASS { + private: + double real_; + double imag_; + + public: + ComplexDouble() + : real_(0), + imag_(0) { + } + ComplexDouble(double _real, double _imag) + : real_(::flatbuffers::EndianScalar(_real)), + imag_(::flatbuffers::EndianScalar(_imag)) { + } + double real() const { + return ::flatbuffers::EndianScalar(real_); + } + void mutate_real(double _real) { + ::flatbuffers::WriteScalar(&real_, _real); + } + double imag() const { + return ::flatbuffers::EndianScalar(imag_); + } + void mutate_imag(double _imag) { + ::flatbuffers::WriteScalar(&imag_, _imag); + } +}; +FLATBUFFERS_STRUCT_END(ComplexDouble, 16); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) Instruction FLATBUFFERS_FINAL_CLASS { + private: + int8_t op_; + int8_t padding0__; + uint16_t n_; + int32_t x_; + + public: + Instruction() + : op_(0), + padding0__(0), + n_(0), + x_(0) { + (void)padding0__; + } + Instruction(int8_t _op, uint16_t _n, int32_t _x) + : op_(::flatbuffers::EndianScalar(_op)), + padding0__(0), + n_(::flatbuffers::EndianScalar(_n)), + x_(::flatbuffers::EndianScalar(_x)) { + (void)padding0__; + } + int8_t op() const { + return ::flatbuffers::EndianScalar(op_); + } + void mutate_op(int8_t _op) { + ::flatbuffers::WriteScalar(&op_, _op); + } + uint16_t n() const { + return ::flatbuffers::EndianScalar(n_); + } + void mutate_n(uint16_t _n) { + ::flatbuffers::WriteScalar(&n_, _n); + } + int32_t x() const { + return ::flatbuffers::EndianScalar(x_); + } + void mutate_x(int32_t _x) { + ::flatbuffers::WriteScalar(&x_, _x); + } +}; +FLATBUFFERS_STRUCT_END(Instruction, 8); + +struct QuantizedSchema FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef QuantizedSchemaBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_QSCHEME = 4, + VT_SCALE = 6, + VT_ZERO_POINT = 8, + VT_SCALES = 10, + VT_ZERO_POINTS = 12, + VT_AXIS = 14 + }; + int8_t qscheme() const { + return GetField(VT_QSCHEME, 0); + } + bool mutate_qscheme(int8_t _qscheme = 0) { + return SetField(VT_QSCHEME, _qscheme, 0); + } + double scale() const { + return GetField(VT_SCALE, 0.0); + } + bool mutate_scale(double _scale = 0.0) { + return SetField(VT_SCALE, _scale, 0.0); + } + int32_t zero_point() const { + return GetField(VT_ZERO_POINT, 0); + } + bool mutate_zero_point(int32_t _zero_point = 0) { + return SetField(VT_ZERO_POINT, _zero_point, 0); + } + const torch::jit::mobile::serialization::TensorMetadata *scales() const { + return GetPointer(VT_SCALES); + } + torch::jit::mobile::serialization::TensorMetadata *mutable_scales() { + return GetPointer(VT_SCALES); + } + const torch::jit::mobile::serialization::TensorMetadata *zero_points() const { + return GetPointer(VT_ZERO_POINTS); + } + torch::jit::mobile::serialization::TensorMetadata *mutable_zero_points() { + return GetPointer(VT_ZERO_POINTS); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool mutate_axis(int32_t _axis = 0) { + return SetField(VT_AXIS, _axis, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_QSCHEME, 1) && + VerifyField(verifier, VT_SCALE, 8) && + VerifyField(verifier, VT_ZERO_POINT, 4) && + VerifyOffset(verifier, VT_SCALES) && + verifier.VerifyTable(scales()) && + VerifyOffset(verifier, VT_ZERO_POINTS) && + verifier.VerifyTable(zero_points()) && + VerifyField(verifier, VT_AXIS, 4) && + verifier.EndTable(); + } +}; + +struct QuantizedSchemaBuilder { + typedef QuantizedSchema Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_qscheme(int8_t qscheme) { + fbb_.AddElement(QuantizedSchema::VT_QSCHEME, qscheme, 0); + } + void add_scale(double scale) { + fbb_.AddElement(QuantizedSchema::VT_SCALE, scale, 0.0); + } + void add_zero_point(int32_t zero_point) { + fbb_.AddElement(QuantizedSchema::VT_ZERO_POINT, zero_point, 0); + } + void add_scales(::flatbuffers::Offset scales) { + fbb_.AddOffset(QuantizedSchema::VT_SCALES, scales); + } + void add_zero_points(::flatbuffers::Offset zero_points) { + fbb_.AddOffset(QuantizedSchema::VT_ZERO_POINTS, zero_points); + } + void add_axis(int32_t axis) { + fbb_.AddElement(QuantizedSchema::VT_AXIS, axis, 0); + } + explicit QuantizedSchemaBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateQuantizedSchema( + ::flatbuffers::FlatBufferBuilder &_fbb, + int8_t qscheme = 0, + double scale = 0.0, + int32_t zero_point = 0, + ::flatbuffers::Offset scales = 0, + ::flatbuffers::Offset zero_points = 0, + int32_t axis = 0) { + QuantizedSchemaBuilder builder_(_fbb); + builder_.add_scale(scale); + builder_.add_axis(axis); + builder_.add_zero_points(zero_points); + builder_.add_scales(scales); + builder_.add_zero_point(zero_point); + builder_.add_qscheme(qscheme); + return builder_.Finish(); +} + +struct TensorMetadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TensorMetadataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_STORAGE_LOCATION_INDEX = 4, + VT_SCALAR_TYPE = 6, + VT_STORAGE_OFFSET = 8, + VT_SIZES = 10, + VT_STRIDES = 12, + VT_REQUIRES_GRAD = 14, + VT_QUANTIZED_SCHEMA = 16 + }; + uint32_t storage_location_index() const { + return GetField(VT_STORAGE_LOCATION_INDEX, 0); + } + bool mutate_storage_location_index(uint32_t _storage_location_index = 0) { + return SetField(VT_STORAGE_LOCATION_INDEX, _storage_location_index, 0); + } + int8_t scalar_type() const { + return GetField(VT_SCALAR_TYPE, 0); + } + bool mutate_scalar_type(int8_t _scalar_type = 0) { + return SetField(VT_SCALAR_TYPE, _scalar_type, 0); + } + int32_t storage_offset() const { + return GetField(VT_STORAGE_OFFSET, 0); + } + bool mutate_storage_offset(int32_t _storage_offset = 0) { + return SetField(VT_STORAGE_OFFSET, _storage_offset, 0); + } + const ::flatbuffers::Vector *sizes() const { + return GetPointer *>(VT_SIZES); + } + ::flatbuffers::Vector *mutable_sizes() { + return GetPointer<::flatbuffers::Vector *>(VT_SIZES); + } + const ::flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + ::flatbuffers::Vector *mutable_strides() { + return GetPointer<::flatbuffers::Vector *>(VT_STRIDES); + } + bool requires_grad() const { + return GetField(VT_REQUIRES_GRAD, 0) != 0; + } + bool mutate_requires_grad(bool _requires_grad = 0) { + return SetField(VT_REQUIRES_GRAD, static_cast(_requires_grad), 0); + } + const torch::jit::mobile::serialization::QuantizedSchema *quantized_schema() const { + return GetPointer(VT_QUANTIZED_SCHEMA); + } + torch::jit::mobile::serialization::QuantizedSchema *mutable_quantized_schema() { + return GetPointer(VT_QUANTIZED_SCHEMA); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_STORAGE_LOCATION_INDEX, 4) && + VerifyField(verifier, VT_SCALAR_TYPE, 1) && + VerifyField(verifier, VT_STORAGE_OFFSET, 4) && + VerifyOffset(verifier, VT_SIZES) && + verifier.VerifyVector(sizes()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyField(verifier, VT_REQUIRES_GRAD, 1) && + VerifyOffset(verifier, VT_QUANTIZED_SCHEMA) && + verifier.VerifyTable(quantized_schema()) && + verifier.EndTable(); + } +}; + +struct TensorMetadataBuilder { + typedef TensorMetadata Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_storage_location_index(uint32_t storage_location_index) { + fbb_.AddElement(TensorMetadata::VT_STORAGE_LOCATION_INDEX, storage_location_index, 0); + } + void add_scalar_type(int8_t scalar_type) { + fbb_.AddElement(TensorMetadata::VT_SCALAR_TYPE, scalar_type, 0); + } + void add_storage_offset(int32_t storage_offset) { + fbb_.AddElement(TensorMetadata::VT_STORAGE_OFFSET, storage_offset, 0); + } + void add_sizes(::flatbuffers::Offset<::flatbuffers::Vector> sizes) { + fbb_.AddOffset(TensorMetadata::VT_SIZES, sizes); + } + void add_strides(::flatbuffers::Offset<::flatbuffers::Vector> strides) { + fbb_.AddOffset(TensorMetadata::VT_STRIDES, strides); + } + void add_requires_grad(bool requires_grad) { + fbb_.AddElement(TensorMetadata::VT_REQUIRES_GRAD, static_cast(requires_grad), 0); + } + void add_quantized_schema(::flatbuffers::Offset quantized_schema) { + fbb_.AddOffset(TensorMetadata::VT_QUANTIZED_SCHEMA, quantized_schema); + } + explicit TensorMetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTensorMetadata( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t storage_location_index = 0, + int8_t scalar_type = 0, + int32_t storage_offset = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> sizes = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> strides = 0, + bool requires_grad = false, + ::flatbuffers::Offset quantized_schema = 0) { + TensorMetadataBuilder builder_(_fbb); + builder_.add_quantized_schema(quantized_schema); + builder_.add_strides(strides); + builder_.add_sizes(sizes); + builder_.add_storage_offset(storage_offset); + builder_.add_storage_location_index(storage_location_index); + builder_.add_requires_grad(requires_grad); + builder_.add_scalar_type(scalar_type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTensorMetadataDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t storage_location_index = 0, + int8_t scalar_type = 0, + int32_t storage_offset = 0, + const std::vector *sizes = nullptr, + const std::vector *strides = nullptr, + bool requires_grad = false, + ::flatbuffers::Offset quantized_schema = 0) { + auto sizes__ = sizes ? _fbb.CreateVector(*sizes) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + return torch::jit::mobile::serialization::CreateTensorMetadata( + _fbb, + storage_location_index, + scalar_type, + storage_offset, + sizes__, + strides__, + requires_grad, + quantized_schema); +} + +struct String FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StringBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA = 4 + }; + const ::flatbuffers::String *data() const { + return GetPointer(VT_DATA); + } + ::flatbuffers::String *mutable_data() { + return GetPointer<::flatbuffers::String *>(VT_DATA); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyString(data()) && + verifier.EndTable(); + } +}; + +struct StringBuilder { + typedef String Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_data(::flatbuffers::Offset<::flatbuffers::String> data) { + fbb_.AddOffset(String::VT_DATA, data); + } + explicit StringBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateString( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> data = 0) { + StringBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStringDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *data = nullptr) { + auto data__ = data ? _fbb.CreateString(data) : 0; + return torch::jit::mobile::serialization::CreateString( + _fbb, + data__); +} + +struct Device FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DeviceBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_STR = 4 + }; + const ::flatbuffers::String *str() const { + return GetPointer(VT_STR); + } + ::flatbuffers::String *mutable_str() { + return GetPointer<::flatbuffers::String *>(VT_STR); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_STR) && + verifier.VerifyString(str()) && + verifier.EndTable(); + } +}; + +struct DeviceBuilder { + typedef Device Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_str(::flatbuffers::Offset<::flatbuffers::String> str) { + fbb_.AddOffset(Device::VT_STR, str); + } + explicit DeviceBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDevice( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> str = 0) { + DeviceBuilder builder_(_fbb); + builder_.add_str(str); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDeviceDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *str = nullptr) { + auto str__ = str ? _fbb.CreateString(str) : 0; + return torch::jit::mobile::serialization::CreateDevice( + _fbb, + str__); +} + +struct List FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4, + VT_ANNOTATION_STR = 6 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + const ::flatbuffers::String *annotation_str() const { + return GetPointer(VT_ANNOTATION_STR); + } + ::flatbuffers::String *mutable_annotation_str() { + return GetPointer<::flatbuffers::String *>(VT_ANNOTATION_STR); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + VerifyOffset(verifier, VT_ANNOTATION_STR) && + verifier.VerifyString(annotation_str()) && + verifier.EndTable(); + } +}; + +struct ListBuilder { + typedef List Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(List::VT_ITEMS, items); + } + void add_annotation_str(::flatbuffers::Offset<::flatbuffers::String> annotation_str) { + fbb_.AddOffset(List::VT_ANNOTATION_STR, annotation_str); + } + explicit ListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0, + ::flatbuffers::Offset<::flatbuffers::String> annotation_str = 0) { + ListBuilder builder_(_fbb); + builder_.add_annotation_str(annotation_str); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr, + const char *annotation_str = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + auto annotation_str__ = annotation_str ? _fbb.CreateString(annotation_str) : 0; + return torch::jit::mobile::serialization::CreateList( + _fbb, + items__, + annotation_str__); +} + +struct IntList FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef IntListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct IntListBuilder { + typedef IntList Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(IntList::VT_ITEMS, items); + } + explicit IntListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateIntList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + IntListBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateIntListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateIntList( + _fbb, + items__); +} + +struct DoubleList FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DoubleListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct DoubleListBuilder { + typedef DoubleList Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(DoubleList::VT_ITEMS, items); + } + explicit DoubleListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDoubleList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + DoubleListBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDoubleListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateDoubleList( + _fbb, + items__); +} + +struct BoolList FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BoolListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct BoolListBuilder { + typedef BoolList Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(BoolList::VT_ITEMS, items); + } + explicit BoolListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBoolList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + BoolListBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateBoolListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateBoolList( + _fbb, + items__); +} + +struct Tuple FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TupleBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct TupleBuilder { + typedef Tuple Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(Tuple::VT_ITEMS, items); + } + explicit TupleBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTuple( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + TupleBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTupleDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateTuple( + _fbb, + items__); +} + +struct Dict FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DictBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEYS = 4, + VT_VALUES = 6, + VT_ANNOTATION_STR = 8 + }; + const ::flatbuffers::Vector *keys() const { + return GetPointer *>(VT_KEYS); + } + ::flatbuffers::Vector *mutable_keys() { + return GetPointer<::flatbuffers::Vector *>(VT_KEYS); + } + const ::flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + ::flatbuffers::Vector *mutable_values() { + return GetPointer<::flatbuffers::Vector *>(VT_VALUES); + } + const ::flatbuffers::String *annotation_str() const { + return GetPointer(VT_ANNOTATION_STR); + } + ::flatbuffers::String *mutable_annotation_str() { + return GetPointer<::flatbuffers::String *>(VT_ANNOTATION_STR); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_KEYS) && + verifier.VerifyVector(keys()) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + VerifyOffset(verifier, VT_ANNOTATION_STR) && + verifier.VerifyString(annotation_str()) && + verifier.EndTable(); + } +}; + +struct DictBuilder { + typedef Dict Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_keys(::flatbuffers::Offset<::flatbuffers::Vector> keys) { + fbb_.AddOffset(Dict::VT_KEYS, keys); + } + void add_values(::flatbuffers::Offset<::flatbuffers::Vector> values) { + fbb_.AddOffset(Dict::VT_VALUES, values); + } + void add_annotation_str(::flatbuffers::Offset<::flatbuffers::String> annotation_str) { + fbb_.AddOffset(Dict::VT_ANNOTATION_STR, annotation_str); + } + explicit DictBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDict( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> keys = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> values = 0, + ::flatbuffers::Offset<::flatbuffers::String> annotation_str = 0) { + DictBuilder builder_(_fbb); + builder_.add_annotation_str(annotation_str); + builder_.add_values(values); + builder_.add_keys(keys); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDictDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *keys = nullptr, + const std::vector *values = nullptr, + const char *annotation_str = nullptr) { + auto keys__ = keys ? _fbb.CreateVector(*keys) : 0; + auto values__ = values ? _fbb.CreateVector(*values) : 0; + auto annotation_str__ = annotation_str ? _fbb.CreateString(annotation_str) : 0; + return torch::jit::mobile::serialization::CreateDict( + _fbb, + keys__, + values__, + annotation_str__); +} + +struct ObjectType FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ObjectTypeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE_NAME = 4, + VT_TYPE = 6, + VT_ATTR_NAMES = 8 + }; + const ::flatbuffers::String *type_name() const { + return GetPointer(VT_TYPE_NAME); + } + ::flatbuffers::String *mutable_type_name() { + return GetPointer<::flatbuffers::String *>(VT_TYPE_NAME); + } + torch::jit::mobile::serialization::TypeType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool mutate_type(torch::jit::mobile::serialization::TypeType _type = static_cast(0)) { + return SetField(VT_TYPE, static_cast(_type), 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *attr_names() const { + return GetPointer> *>(VT_ATTR_NAMES); + } + ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *mutable_attr_names() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_ATTR_NAMES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TYPE_NAME) && + verifier.VerifyString(type_name()) && + VerifyField(verifier, VT_TYPE, 1) && + VerifyOffset(verifier, VT_ATTR_NAMES) && + verifier.VerifyVector(attr_names()) && + verifier.VerifyVectorOfStrings(attr_names()) && + verifier.EndTable(); + } +}; + +struct ObjectTypeBuilder { + typedef ObjectType Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_type_name(::flatbuffers::Offset<::flatbuffers::String> type_name) { + fbb_.AddOffset(ObjectType::VT_TYPE_NAME, type_name); + } + void add_type(torch::jit::mobile::serialization::TypeType type) { + fbb_.AddElement(ObjectType::VT_TYPE, static_cast(type), 0); + } + void add_attr_names(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> attr_names) { + fbb_.AddOffset(ObjectType::VT_ATTR_NAMES, attr_names); + } + explicit ObjectTypeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateObjectType( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> type_name = 0, + torch::jit::mobile::serialization::TypeType type = torch::jit::mobile::serialization::TypeType::UNSET, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> attr_names = 0) { + ObjectTypeBuilder builder_(_fbb); + builder_.add_attr_names(attr_names); + builder_.add_type_name(type_name); + builder_.add_type(type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateObjectTypeDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *type_name = nullptr, + torch::jit::mobile::serialization::TypeType type = torch::jit::mobile::serialization::TypeType::UNSET, + const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *attr_names = nullptr) { + auto type_name__ = type_name ? _fbb.CreateString(type_name) : 0; + auto attr_names__ = attr_names ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*attr_names) : 0; + return torch::jit::mobile::serialization::CreateObjectType( + _fbb, + type_name__, + type, + attr_names__); +} + +struct Object FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ObjectBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE_INDEX = 4, + VT_STATE = 6, + VT_ATTRS = 8, + VT_SETSTATE_FUNC = 10 + }; + uint32_t type_index() const { + return GetField(VT_TYPE_INDEX, 0); + } + bool mutate_type_index(uint32_t _type_index = 0) { + return SetField(VT_TYPE_INDEX, _type_index, 0); + } + uint32_t state() const { + return GetField(VT_STATE, 0); + } + bool mutate_state(uint32_t _state = 0) { + return SetField(VT_STATE, _state, 0); + } + const ::flatbuffers::Vector *attrs() const { + return GetPointer *>(VT_ATTRS); + } + ::flatbuffers::Vector *mutable_attrs() { + return GetPointer<::flatbuffers::Vector *>(VT_ATTRS); + } + uint32_t setstate_func() const { + return GetField(VT_SETSTATE_FUNC, 0); + } + bool mutate_setstate_func(uint32_t _setstate_func = 0) { + return SetField(VT_SETSTATE_FUNC, _setstate_func, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE_INDEX, 4) && + VerifyField(verifier, VT_STATE, 4) && + VerifyOffset(verifier, VT_ATTRS) && + verifier.VerifyVector(attrs()) && + VerifyField(verifier, VT_SETSTATE_FUNC, 4) && + verifier.EndTable(); + } +}; + +struct ObjectBuilder { + typedef Object Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_type_index(uint32_t type_index) { + fbb_.AddElement(Object::VT_TYPE_INDEX, type_index, 0); + } + void add_state(uint32_t state) { + fbb_.AddElement(Object::VT_STATE, state, 0); + } + void add_attrs(::flatbuffers::Offset<::flatbuffers::Vector> attrs) { + fbb_.AddOffset(Object::VT_ATTRS, attrs); + } + void add_setstate_func(uint32_t setstate_func) { + fbb_.AddElement(Object::VT_SETSTATE_FUNC, setstate_func, 0); + } + explicit ObjectBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateObject( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t type_index = 0, + uint32_t state = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> attrs = 0, + uint32_t setstate_func = 0) { + ObjectBuilder builder_(_fbb); + builder_.add_setstate_func(setstate_func); + builder_.add_attrs(attrs); + builder_.add_state(state); + builder_.add_type_index(type_index); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateObjectDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t type_index = 0, + uint32_t state = 0, + const std::vector *attrs = nullptr, + uint32_t setstate_func = 0) { + auto attrs__ = attrs ? _fbb.CreateVector(*attrs) : 0; + return torch::jit::mobile::serialization::CreateObject( + _fbb, + type_index, + state, + attrs__, + setstate_func); +} + +struct EnumValue FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef EnumValueBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE_NAME = 4, + VT_VALUE = 6 + }; + const ::flatbuffers::String *type_name() const { + return GetPointer(VT_TYPE_NAME); + } + ::flatbuffers::String *mutable_type_name() { + return GetPointer<::flatbuffers::String *>(VT_TYPE_NAME); + } + uint32_t value() const { + return GetField(VT_VALUE, 0); + } + bool mutate_value(uint32_t _value = 0) { + return SetField(VT_VALUE, _value, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TYPE_NAME) && + verifier.VerifyString(type_name()) && + VerifyField(verifier, VT_VALUE, 4) && + verifier.EndTable(); + } +}; + +struct EnumValueBuilder { + typedef EnumValue Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_type_name(::flatbuffers::Offset<::flatbuffers::String> type_name) { + fbb_.AddOffset(EnumValue::VT_TYPE_NAME, type_name); + } + void add_value(uint32_t value) { + fbb_.AddElement(EnumValue::VT_VALUE, value, 0); + } + explicit EnumValueBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateEnumValue( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> type_name = 0, + uint32_t value = 0) { + EnumValueBuilder builder_(_fbb); + builder_.add_value(value); + builder_.add_type_name(type_name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateEnumValueDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *type_name = nullptr, + uint32_t value = 0) { + auto type_name__ = type_name ? _fbb.CreateString(type_name) : 0; + return torch::jit::mobile::serialization::CreateEnumValue( + _fbb, + type_name__, + value); +} + +struct Operator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef OperatorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_OVERLOAD_NAME = 6, + VT_NUM_ARGS_SERIALIZED = 8 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } + const ::flatbuffers::String *overload_name() const { + return GetPointer(VT_OVERLOAD_NAME); + } + ::flatbuffers::String *mutable_overload_name() { + return GetPointer<::flatbuffers::String *>(VT_OVERLOAD_NAME); + } + int32_t num_args_serialized() const { + return GetField(VT_NUM_ARGS_SERIALIZED, -1); + } + bool mutate_num_args_serialized(int32_t _num_args_serialized = -1) { + return SetField(VT_NUM_ARGS_SERIALIZED, _num_args_serialized, -1); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_OVERLOAD_NAME) && + verifier.VerifyString(overload_name()) && + VerifyField(verifier, VT_NUM_ARGS_SERIALIZED, 4) && + verifier.EndTable(); + } +}; + +struct OperatorBuilder { + typedef Operator Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(Operator::VT_NAME, name); + } + void add_overload_name(::flatbuffers::Offset<::flatbuffers::String> overload_name) { + fbb_.AddOffset(Operator::VT_OVERLOAD_NAME, overload_name); + } + void add_num_args_serialized(int32_t num_args_serialized) { + fbb_.AddElement(Operator::VT_NUM_ARGS_SERIALIZED, num_args_serialized, -1); + } + explicit OperatorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateOperator( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + ::flatbuffers::Offset<::flatbuffers::String> overload_name = 0, + int32_t num_args_serialized = -1) { + OperatorBuilder builder_(_fbb); + builder_.add_num_args_serialized(num_args_serialized); + builder_.add_overload_name(overload_name); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateOperatorDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const char *overload_name = nullptr, + int32_t num_args_serialized = -1) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto overload_name__ = overload_name ? _fbb.CreateString(overload_name) : 0; + return torch::jit::mobile::serialization::CreateOperator( + _fbb, + name__, + overload_name__, + num_args_serialized); +} + +struct Arg FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ArgBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_TYPE = 6, + VT_DEFAULT_VALUE = 8 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } + const ::flatbuffers::String *type() const { + return GetPointer(VT_TYPE); + } + ::flatbuffers::String *mutable_type() { + return GetPointer<::flatbuffers::String *>(VT_TYPE); + } + uint32_t default_value() const { + return GetField(VT_DEFAULT_VALUE, 0); + } + bool mutate_default_value(uint32_t _default_value = 0) { + return SetField(VT_DEFAULT_VALUE, _default_value, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_TYPE) && + verifier.VerifyString(type()) && + VerifyField(verifier, VT_DEFAULT_VALUE, 4) && + verifier.EndTable(); + } +}; + +struct ArgBuilder { + typedef Arg Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(Arg::VT_NAME, name); + } + void add_type(::flatbuffers::Offset<::flatbuffers::String> type) { + fbb_.AddOffset(Arg::VT_TYPE, type); + } + void add_default_value(uint32_t default_value) { + fbb_.AddElement(Arg::VT_DEFAULT_VALUE, default_value, 0); + } + explicit ArgBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateArg( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + ::flatbuffers::Offset<::flatbuffers::String> type = 0, + uint32_t default_value = 0) { + ArgBuilder builder_(_fbb); + builder_.add_default_value(default_value); + builder_.add_type(type); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateArgDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const char *type = nullptr, + uint32_t default_value = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto type__ = type ? _fbb.CreateString(type) : 0; + return torch::jit::mobile::serialization::CreateArg( + _fbb, + name__, + type__, + default_value); +} + +struct Schema FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SchemaBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ARGUMENTS = 4, + VT_RETURNS = 6 + }; + const ::flatbuffers::Vector<::flatbuffers::Offset> *arguments() const { + return GetPointer> *>(VT_ARGUMENTS); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_arguments() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_ARGUMENTS); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *returns() const { + return GetPointer> *>(VT_RETURNS); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_returns() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_RETURNS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ARGUMENTS) && + verifier.VerifyVector(arguments()) && + verifier.VerifyVectorOfTables(arguments()) && + VerifyOffset(verifier, VT_RETURNS) && + verifier.VerifyVector(returns()) && + verifier.VerifyVectorOfTables(returns()) && + verifier.EndTable(); + } +}; + +struct SchemaBuilder { + typedef Schema Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_arguments(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> arguments) { + fbb_.AddOffset(Schema::VT_ARGUMENTS, arguments); + } + void add_returns(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> returns) { + fbb_.AddOffset(Schema::VT_RETURNS, returns); + } + explicit SchemaBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSchema( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> arguments = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> returns = 0) { + SchemaBuilder builder_(_fbb); + builder_.add_returns(returns); + builder_.add_arguments(arguments); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateSchemaDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<::flatbuffers::Offset> *arguments = nullptr, + const std::vector<::flatbuffers::Offset> *returns = nullptr) { + auto arguments__ = arguments ? _fbb.CreateVector<::flatbuffers::Offset>(*arguments) : 0; + auto returns__ = returns ? _fbb.CreateVector<::flatbuffers::Offset>(*returns) : 0; + return torch::jit::mobile::serialization::CreateSchema( + _fbb, + arguments__, + returns__); +} + +struct DebugInfo FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DebugInfoBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DEBUG_HANDLE = 4 + }; + const ::flatbuffers::Vector *debug_handle() const { + return GetPointer *>(VT_DEBUG_HANDLE); + } + ::flatbuffers::Vector *mutable_debug_handle() { + return GetPointer<::flatbuffers::Vector *>(VT_DEBUG_HANDLE); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DEBUG_HANDLE) && + verifier.VerifyVector(debug_handle()) && + verifier.EndTable(); + } +}; + +struct DebugInfoBuilder { + typedef DebugInfo Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_debug_handle(::flatbuffers::Offset<::flatbuffers::Vector> debug_handle) { + fbb_.AddOffset(DebugInfo::VT_DEBUG_HANDLE, debug_handle); + } + explicit DebugInfoBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDebugInfo( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> debug_handle = 0) { + DebugInfoBuilder builder_(_fbb); + builder_.add_debug_handle(debug_handle); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDebugInfoDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *debug_handle = nullptr) { + auto debug_handle__ = debug_handle ? _fbb.CreateVector(*debug_handle) : 0; + return torch::jit::mobile::serialization::CreateDebugInfo( + _fbb, + debug_handle__); +} + +struct Function FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef FunctionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_QN = 4, + VT_INSTRUCTIONS = 6, + VT_OPERATORS = 8, + VT_CONSTANTS = 10, + VT_TYPE_ANNOTATIONS = 12, + VT_REGISTER_SIZE = 14, + VT_SCHEMA = 16, + VT_DEBUG_INFO = 18, + VT_CLASS_TYPE = 20 + }; + const ::flatbuffers::String *qn() const { + return GetPointer(VT_QN); + } + ::flatbuffers::String *mutable_qn() { + return GetPointer<::flatbuffers::String *>(VT_QN); + } + const ::flatbuffers::Vector *instructions() const { + return GetPointer *>(VT_INSTRUCTIONS); + } + ::flatbuffers::Vector *mutable_instructions() { + return GetPointer<::flatbuffers::Vector *>(VT_INSTRUCTIONS); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *operators() const { + return GetPointer> *>(VT_OPERATORS); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_operators() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_OPERATORS); + } + const ::flatbuffers::Vector *constants() const { + return GetPointer *>(VT_CONSTANTS); + } + ::flatbuffers::Vector *mutable_constants() { + return GetPointer<::flatbuffers::Vector *>(VT_CONSTANTS); + } + const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *type_annotations() const { + return GetPointer> *>(VT_TYPE_ANNOTATIONS); + } + ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *mutable_type_annotations() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_TYPE_ANNOTATIONS); + } + int32_t register_size() const { + return GetField(VT_REGISTER_SIZE, 0); + } + bool mutate_register_size(int32_t _register_size = 0) { + return SetField(VT_REGISTER_SIZE, _register_size, 0); + } + const torch::jit::mobile::serialization::Schema *schema() const { + return GetPointer(VT_SCHEMA); + } + torch::jit::mobile::serialization::Schema *mutable_schema() { + return GetPointer(VT_SCHEMA); + } + const torch::jit::mobile::serialization::DebugInfo *debug_info() const { + return GetPointer(VT_DEBUG_INFO); + } + torch::jit::mobile::serialization::DebugInfo *mutable_debug_info() { + return GetPointer(VT_DEBUG_INFO); + } + uint32_t class_type() const { + return GetField(VT_CLASS_TYPE, 0); + } + bool mutate_class_type(uint32_t _class_type = 0) { + return SetField(VT_CLASS_TYPE, _class_type, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_QN) && + verifier.VerifyString(qn()) && + VerifyOffset(verifier, VT_INSTRUCTIONS) && + verifier.VerifyVector(instructions()) && + VerifyOffset(verifier, VT_OPERATORS) && + verifier.VerifyVector(operators()) && + verifier.VerifyVectorOfTables(operators()) && + VerifyOffset(verifier, VT_CONSTANTS) && + verifier.VerifyVector(constants()) && + VerifyOffset(verifier, VT_TYPE_ANNOTATIONS) && + verifier.VerifyVector(type_annotations()) && + verifier.VerifyVectorOfStrings(type_annotations()) && + VerifyField(verifier, VT_REGISTER_SIZE, 4) && + VerifyOffset(verifier, VT_SCHEMA) && + verifier.VerifyTable(schema()) && + VerifyOffset(verifier, VT_DEBUG_INFO) && + verifier.VerifyTable(debug_info()) && + VerifyField(verifier, VT_CLASS_TYPE, 4) && + verifier.EndTable(); + } +}; + +struct FunctionBuilder { + typedef Function Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_qn(::flatbuffers::Offset<::flatbuffers::String> qn) { + fbb_.AddOffset(Function::VT_QN, qn); + } + void add_instructions(::flatbuffers::Offset<::flatbuffers::Vector> instructions) { + fbb_.AddOffset(Function::VT_INSTRUCTIONS, instructions); + } + void add_operators(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operators) { + fbb_.AddOffset(Function::VT_OPERATORS, operators); + } + void add_constants(::flatbuffers::Offset<::flatbuffers::Vector> constants) { + fbb_.AddOffset(Function::VT_CONSTANTS, constants); + } + void add_type_annotations(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> type_annotations) { + fbb_.AddOffset(Function::VT_TYPE_ANNOTATIONS, type_annotations); + } + void add_register_size(int32_t register_size) { + fbb_.AddElement(Function::VT_REGISTER_SIZE, register_size, 0); + } + void add_schema(::flatbuffers::Offset schema) { + fbb_.AddOffset(Function::VT_SCHEMA, schema); + } + void add_debug_info(::flatbuffers::Offset debug_info) { + fbb_.AddOffset(Function::VT_DEBUG_INFO, debug_info); + } + void add_class_type(uint32_t class_type) { + fbb_.AddElement(Function::VT_CLASS_TYPE, class_type, 0); + } + explicit FunctionBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateFunction( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> qn = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> instructions = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operators = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> constants = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> type_annotations = 0, + int32_t register_size = 0, + ::flatbuffers::Offset schema = 0, + ::flatbuffers::Offset debug_info = 0, + uint32_t class_type = 0) { + FunctionBuilder builder_(_fbb); + builder_.add_class_type(class_type); + builder_.add_debug_info(debug_info); + builder_.add_schema(schema); + builder_.add_register_size(register_size); + builder_.add_type_annotations(type_annotations); + builder_.add_constants(constants); + builder_.add_operators(operators); + builder_.add_instructions(instructions); + builder_.add_qn(qn); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateFunctionDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *qn = nullptr, + const std::vector *instructions = nullptr, + const std::vector<::flatbuffers::Offset> *operators = nullptr, + const std::vector *constants = nullptr, + const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *type_annotations = nullptr, + int32_t register_size = 0, + ::flatbuffers::Offset schema = 0, + ::flatbuffers::Offset debug_info = 0, + uint32_t class_type = 0) { + auto qn__ = qn ? _fbb.CreateString(qn) : 0; + auto instructions__ = instructions ? _fbb.CreateVectorOfStructs(*instructions) : 0; + auto operators__ = operators ? _fbb.CreateVector<::flatbuffers::Offset>(*operators) : 0; + auto constants__ = constants ? _fbb.CreateVector(*constants) : 0; + auto type_annotations__ = type_annotations ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*type_annotations) : 0; + return torch::jit::mobile::serialization::CreateFunction( + _fbb, + qn__, + instructions__, + operators__, + constants__, + type_annotations__, + register_size, + schema, + debug_info, + class_type); +} + +struct StorageData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StorageDataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA = 4 + }; + const ::flatbuffers::Vector *data() const { + return GetPointer *>(VT_DATA); + } + ::flatbuffers::Vector *mutable_data() { + return GetPointer<::flatbuffers::Vector *>(VT_DATA); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && + verifier.EndTable(); + } +}; + +struct StorageDataBuilder { + typedef StorageData Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_data(::flatbuffers::Offset<::flatbuffers::Vector> data) { + fbb_.AddOffset(StorageData::VT_DATA, data); + } + explicit StorageDataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStorageData( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> data = 0) { + StorageDataBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStorageDataDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *data = nullptr) { + if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } + auto data__ = data ? _fbb.CreateVector(*data) : 0; + return torch::jit::mobile::serialization::CreateStorageData( + _fbb, + data__); +} + +struct IValue FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef IValueBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VAL_TYPE = 4, + VT_VAL = 6 + }; + torch::jit::mobile::serialization::IValueUnion val_type() const { + return static_cast(GetField(VT_VAL_TYPE, 0)); + } + const void *val() const { + return GetPointer(VT_VAL); + } + template const T *val_as() const; + const torch::jit::mobile::serialization::Int *val_as_Int() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Int ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Bool *val_as_Bool() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Bool ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Double *val_as_Double() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Double ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::ComplexDouble *val_as_ComplexDouble() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::ComplexDouble ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::TensorMetadata *val_as_TensorMetadata() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::TensorMetadata ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::String *val_as_String() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::String ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::List *val_as_List() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::List ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Tuple *val_as_Tuple() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Tuple ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Dict *val_as_Dict() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Dict ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Object *val_as_Object() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Object ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::IntList *val_as_IntList() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::IntList ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::DoubleList *val_as_DoubleList() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::DoubleList ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::BoolList *val_as_BoolList() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::BoolList ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Device *val_as_Device() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Device ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::EnumValue *val_as_EnumValue() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::EnumValue ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Function *val_as_Function() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Function ? static_cast(val()) : nullptr; + } + void *mutable_val() { + return GetPointer(VT_VAL); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VAL_TYPE, 1) && + VerifyOffset(verifier, VT_VAL) && + VerifyIValueUnion(verifier, val(), val_type()) && + verifier.EndTable(); + } +}; + +template<> inline const torch::jit::mobile::serialization::Int *IValue::val_as() const { + return val_as_Int(); +} + +template<> inline const torch::jit::mobile::serialization::Bool *IValue::val_as() const { + return val_as_Bool(); +} + +template<> inline const torch::jit::mobile::serialization::Double *IValue::val_as() const { + return val_as_Double(); +} + +template<> inline const torch::jit::mobile::serialization::ComplexDouble *IValue::val_as() const { + return val_as_ComplexDouble(); +} + +template<> inline const torch::jit::mobile::serialization::TensorMetadata *IValue::val_as() const { + return val_as_TensorMetadata(); +} + +template<> inline const torch::jit::mobile::serialization::String *IValue::val_as() const { + return val_as_String(); +} + +template<> inline const torch::jit::mobile::serialization::List *IValue::val_as() const { + return val_as_List(); +} + +template<> inline const torch::jit::mobile::serialization::Tuple *IValue::val_as() const { + return val_as_Tuple(); +} + +template<> inline const torch::jit::mobile::serialization::Dict *IValue::val_as() const { + return val_as_Dict(); +} + +template<> inline const torch::jit::mobile::serialization::Object *IValue::val_as() const { + return val_as_Object(); +} + +template<> inline const torch::jit::mobile::serialization::IntList *IValue::val_as() const { + return val_as_IntList(); +} + +template<> inline const torch::jit::mobile::serialization::DoubleList *IValue::val_as() const { + return val_as_DoubleList(); +} + +template<> inline const torch::jit::mobile::serialization::BoolList *IValue::val_as() const { + return val_as_BoolList(); +} + +template<> inline const torch::jit::mobile::serialization::Device *IValue::val_as() const { + return val_as_Device(); +} + +template<> inline const torch::jit::mobile::serialization::EnumValue *IValue::val_as() const { + return val_as_EnumValue(); +} + +template<> inline const torch::jit::mobile::serialization::Function *IValue::val_as() const { + return val_as_Function(); +} + +struct IValueBuilder { + typedef IValue Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_val_type(torch::jit::mobile::serialization::IValueUnion val_type) { + fbb_.AddElement(IValue::VT_VAL_TYPE, static_cast(val_type), 0); + } + void add_val(::flatbuffers::Offset val) { + fbb_.AddOffset(IValue::VT_VAL, val); + } + explicit IValueBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateIValue( + ::flatbuffers::FlatBufferBuilder &_fbb, + torch::jit::mobile::serialization::IValueUnion val_type = torch::jit::mobile::serialization::IValueUnion::NONE, + ::flatbuffers::Offset val = 0) { + IValueBuilder builder_(_fbb); + builder_.add_val(val); + builder_.add_val_type(val_type); + return builder_.Finish(); +} + +struct ExtraFile FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ExtraFileBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_CONTENT = 6 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } + const ::flatbuffers::String *content() const { + return GetPointer(VT_CONTENT); + } + ::flatbuffers::String *mutable_content() { + return GetPointer<::flatbuffers::String *>(VT_CONTENT); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_CONTENT) && + verifier.VerifyString(content()) && + verifier.EndTable(); + } +}; + +struct ExtraFileBuilder { + typedef ExtraFile Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(ExtraFile::VT_NAME, name); + } + void add_content(::flatbuffers::Offset<::flatbuffers::String> content) { + fbb_.AddOffset(ExtraFile::VT_CONTENT, content); + } + explicit ExtraFileBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateExtraFile( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + ::flatbuffers::Offset<::flatbuffers::String> content = 0) { + ExtraFileBuilder builder_(_fbb); + builder_.add_content(content); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateExtraFileDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const char *content = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto content__ = content ? _fbb.CreateString(content) : 0; + return torch::jit::mobile::serialization::CreateExtraFile( + _fbb, + name__, + content__); +} + +struct Module FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ModuleBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BYTECODE_VERSION = 4, + VT_EXTRA_FILES = 6, + VT_METHODS = 8, + VT_STATE_OBJ = 10, + VT_IVALUES = 12, + VT_STORAGE_DATA_SIZE = 14, + VT_STORAGE_DATA = 16, + VT_OBJECT_TYPES = 18, + VT_JIT_SOURCES = 20, + VT_JIT_CONSTANTS = 22, + VT_OPERATOR_VERSION = 24, + VT_MOBILE_IVALUE_SIZE = 26 + }; + uint32_t bytecode_version() const { + return GetField(VT_BYTECODE_VERSION, 0); + } + bool mutate_bytecode_version(uint32_t _bytecode_version = 0) { + return SetField(VT_BYTECODE_VERSION, _bytecode_version, 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *extra_files() const { + return GetPointer> *>(VT_EXTRA_FILES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_extra_files() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_EXTRA_FILES); + } + const ::flatbuffers::Vector *methods() const { + return GetPointer *>(VT_METHODS); + } + ::flatbuffers::Vector *mutable_methods() { + return GetPointer<::flatbuffers::Vector *>(VT_METHODS); + } + uint32_t state_obj() const { + return GetField(VT_STATE_OBJ, 0); + } + bool mutate_state_obj(uint32_t _state_obj = 0) { + return SetField(VT_STATE_OBJ, _state_obj, 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *ivalues() const { + return GetPointer> *>(VT_IVALUES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_ivalues() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_IVALUES); + } + int32_t storage_data_size() const { + return GetField(VT_STORAGE_DATA_SIZE, 0); + } + bool mutate_storage_data_size(int32_t _storage_data_size = 0) { + return SetField(VT_STORAGE_DATA_SIZE, _storage_data_size, 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *storage_data() const { + return GetPointer> *>(VT_STORAGE_DATA); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_storage_data() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_STORAGE_DATA); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *object_types() const { + return GetPointer> *>(VT_OBJECT_TYPES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_object_types() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_OBJECT_TYPES); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *jit_sources() const { + return GetPointer> *>(VT_JIT_SOURCES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_jit_sources() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_JIT_SOURCES); + } + const ::flatbuffers::Vector *jit_constants() const { + return GetPointer *>(VT_JIT_CONSTANTS); + } + ::flatbuffers::Vector *mutable_jit_constants() { + return GetPointer<::flatbuffers::Vector *>(VT_JIT_CONSTANTS); + } + uint32_t operator_version() const { + return GetField(VT_OPERATOR_VERSION, 0); + } + bool mutate_operator_version(uint32_t _operator_version = 0) { + return SetField(VT_OPERATOR_VERSION, _operator_version, 0); + } + uint32_t mobile_ivalue_size() const { + return GetField(VT_MOBILE_IVALUE_SIZE, 0); + } + bool mutate_mobile_ivalue_size(uint32_t _mobile_ivalue_size = 0) { + return SetField(VT_MOBILE_IVALUE_SIZE, _mobile_ivalue_size, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BYTECODE_VERSION, 4) && + VerifyOffset(verifier, VT_EXTRA_FILES) && + verifier.VerifyVector(extra_files()) && + verifier.VerifyVectorOfTables(extra_files()) && + VerifyOffset(verifier, VT_METHODS) && + verifier.VerifyVector(methods()) && + VerifyField(verifier, VT_STATE_OBJ, 4) && + VerifyOffset(verifier, VT_IVALUES) && + verifier.VerifyVector(ivalues()) && + verifier.VerifyVectorOfTables(ivalues()) && + VerifyField(verifier, VT_STORAGE_DATA_SIZE, 4) && + VerifyOffset(verifier, VT_STORAGE_DATA) && + verifier.VerifyVector(storage_data()) && + verifier.VerifyVectorOfTables(storage_data()) && + VerifyOffset(verifier, VT_OBJECT_TYPES) && + verifier.VerifyVector(object_types()) && + verifier.VerifyVectorOfTables(object_types()) && + VerifyOffset(verifier, VT_JIT_SOURCES) && + verifier.VerifyVector(jit_sources()) && + verifier.VerifyVectorOfTables(jit_sources()) && + VerifyOffset(verifier, VT_JIT_CONSTANTS) && + verifier.VerifyVector(jit_constants()) && + VerifyField(verifier, VT_OPERATOR_VERSION, 4) && + VerifyField(verifier, VT_MOBILE_IVALUE_SIZE, 4) && + verifier.EndTable(); + } +}; + +struct ModuleBuilder { + typedef Module Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_bytecode_version(uint32_t bytecode_version) { + fbb_.AddElement(Module::VT_BYTECODE_VERSION, bytecode_version, 0); + } + void add_extra_files(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> extra_files) { + fbb_.AddOffset(Module::VT_EXTRA_FILES, extra_files); + } + void add_methods(::flatbuffers::Offset<::flatbuffers::Vector> methods) { + fbb_.AddOffset(Module::VT_METHODS, methods); + } + void add_state_obj(uint32_t state_obj) { + fbb_.AddElement(Module::VT_STATE_OBJ, state_obj, 0); + } + void add_ivalues(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> ivalues) { + fbb_.AddOffset(Module::VT_IVALUES, ivalues); + } + void add_storage_data_size(int32_t storage_data_size) { + fbb_.AddElement(Module::VT_STORAGE_DATA_SIZE, storage_data_size, 0); + } + void add_storage_data(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> storage_data) { + fbb_.AddOffset(Module::VT_STORAGE_DATA, storage_data); + } + void add_object_types(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> object_types) { + fbb_.AddOffset(Module::VT_OBJECT_TYPES, object_types); + } + void add_jit_sources(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> jit_sources) { + fbb_.AddOffset(Module::VT_JIT_SOURCES, jit_sources); + } + void add_jit_constants(::flatbuffers::Offset<::flatbuffers::Vector> jit_constants) { + fbb_.AddOffset(Module::VT_JIT_CONSTANTS, jit_constants); + } + void add_operator_version(uint32_t operator_version) { + fbb_.AddElement(Module::VT_OPERATOR_VERSION, operator_version, 0); + } + void add_mobile_ivalue_size(uint32_t mobile_ivalue_size) { + fbb_.AddElement(Module::VT_MOBILE_IVALUE_SIZE, mobile_ivalue_size, 0); + } + explicit ModuleBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateModule( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t bytecode_version = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> extra_files = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> methods = 0, + uint32_t state_obj = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> ivalues = 0, + int32_t storage_data_size = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> storage_data = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> object_types = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> jit_sources = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> jit_constants = 0, + uint32_t operator_version = 0, + uint32_t mobile_ivalue_size = 0) { + ModuleBuilder builder_(_fbb); + builder_.add_mobile_ivalue_size(mobile_ivalue_size); + builder_.add_operator_version(operator_version); + builder_.add_jit_constants(jit_constants); + builder_.add_jit_sources(jit_sources); + builder_.add_object_types(object_types); + builder_.add_storage_data(storage_data); + builder_.add_storage_data_size(storage_data_size); + builder_.add_ivalues(ivalues); + builder_.add_state_obj(state_obj); + builder_.add_methods(methods); + builder_.add_extra_files(extra_files); + builder_.add_bytecode_version(bytecode_version); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateModuleDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t bytecode_version = 0, + const std::vector<::flatbuffers::Offset> *extra_files = nullptr, + const std::vector *methods = nullptr, + uint32_t state_obj = 0, + const std::vector<::flatbuffers::Offset> *ivalues = nullptr, + int32_t storage_data_size = 0, + const std::vector<::flatbuffers::Offset> *storage_data = nullptr, + const std::vector<::flatbuffers::Offset> *object_types = nullptr, + const std::vector<::flatbuffers::Offset> *jit_sources = nullptr, + const std::vector *jit_constants = nullptr, + uint32_t operator_version = 0, + uint32_t mobile_ivalue_size = 0) { + auto extra_files__ = extra_files ? _fbb.CreateVector<::flatbuffers::Offset>(*extra_files) : 0; + auto methods__ = methods ? _fbb.CreateVector(*methods) : 0; + auto ivalues__ = ivalues ? _fbb.CreateVector<::flatbuffers::Offset>(*ivalues) : 0; + auto storage_data__ = storage_data ? _fbb.CreateVector<::flatbuffers::Offset>(*storage_data) : 0; + auto object_types__ = object_types ? _fbb.CreateVector<::flatbuffers::Offset>(*object_types) : 0; + auto jit_sources__ = jit_sources ? _fbb.CreateVector<::flatbuffers::Offset>(*jit_sources) : 0; + auto jit_constants__ = jit_constants ? _fbb.CreateVector(*jit_constants) : 0; + return torch::jit::mobile::serialization::CreateModule( + _fbb, + bytecode_version, + extra_files__, + methods__, + state_obj, + ivalues__, + storage_data_size, + storage_data__, + object_types__, + jit_sources__, + jit_constants__, + operator_version, + mobile_ivalue_size); +} + +inline bool VerifyIValueUnion(::flatbuffers::Verifier &verifier, const void *obj, IValueUnion type) { + switch (type) { + case IValueUnion::NONE: { + return true; + } + case IValueUnion::Int: { + return verifier.VerifyField(static_cast(obj), 0, 8); + } + case IValueUnion::Bool: { + return verifier.VerifyField(static_cast(obj), 0, 1); + } + case IValueUnion::Double: { + return verifier.VerifyField(static_cast(obj), 0, 8); + } + case IValueUnion::ComplexDouble: { + return verifier.VerifyField(static_cast(obj), 0, 8); + } + case IValueUnion::TensorMetadata: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::String: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::List: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Tuple: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Dict: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Object: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::IntList: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::DoubleList: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::BoolList: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Device: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::EnumValue: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Function: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifyIValueUnionVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyIValueUnion( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline const torch::jit::mobile::serialization::Module *GetModule(const void *buf) { + return ::flatbuffers::GetRoot(buf); +} + +inline const torch::jit::mobile::serialization::Module *GetSizePrefixedModule(const void *buf) { + return ::flatbuffers::GetSizePrefixedRoot(buf); +} + +inline Module *GetMutableModule(void *buf) { + return ::flatbuffers::GetMutableRoot(buf); +} + +inline torch::jit::mobile::serialization::Module *GetMutableSizePrefixedModule(void *buf) { + return ::flatbuffers::GetMutableSizePrefixedRoot(buf); +} + +inline const char *ModuleIdentifier() { + return "PTMF"; +} + +inline bool ModuleBufferHasIdentifier(const void *buf) { + return ::flatbuffers::BufferHasIdentifier( + buf, ModuleIdentifier()); +} + +inline bool SizePrefixedModuleBufferHasIdentifier(const void *buf) { + return ::flatbuffers::BufferHasIdentifier( + buf, ModuleIdentifier(), true); +} + +inline bool VerifyModuleBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(ModuleIdentifier()); +} + +inline bool VerifySizePrefixedModuleBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(ModuleIdentifier()); +} + +inline void FinishModuleBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.Finish(root, ModuleIdentifier()); +} + +inline void FinishSizePrefixedModuleBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root, ModuleIdentifier()); +} + +} // namespace serialization +} // namespace mobile +} // namespace jit +} // namespace torch + +#endif // FLATBUFFERS_GENERATED_MOBILEBYTECODE_TORCH_JIT_MOBILE_SERIALIZATION_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/onnx.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..813064d4fabb927b9125b791d892d82483c53845 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/onnx.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API std::string prettyPrint(const ::ONNX_NAMESPACE::ModelProto& model); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickle.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickle.h new file mode 100644 index 0000000000000000000000000000000000000000..a546867e744238331ce692dac99b416733f1a53e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickle.h @@ -0,0 +1,107 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +/// Pickle an IValue by calling a function to handle writing the data. +/// +/// `writer` is a function that takes in a pointer to a chunk of memory and its +/// size and consumes it. +/// +/// See `jit::pickle` for more details. +TORCH_API void pickle( + std::function writer, + const IValue& ivalue, + std::vector* tensor_table = nullptr); + +/// Save a `torch::IValue` in a format compatible with Python's `pickle` module +/// +/// If present, `tensor_table` is a pointer to a table in which tensors that +/// are contained within `ivalue` are stored, and the bytes returned by the +/// pickler will only include references to these tensors in the table. This can +/// be used to keep the binary blob size small. +/// If not provided, tensors are stored in the same byte stream as the pickle +/// data, similar to `torch.save()` in eager Python. +/// +/// Pickled values can be loaded in Python and C++: +/// \rst +/// .. code-block:: cpp +/// +/// torch::IValue float_value(2.3); +/// +/// // TODO: when tensors are stored in the pickle, delete this +/// std::vector tensor_table; +/// auto data = torch::jit::pickle(float_value, &tensor_table); +/// +/// std::vector ivalues = +/// torch::jit::unpickle(data.data(), data.size()); +/// +/// .. code-block:: python +/// +/// values = torch.load('data.pkl') +/// print(values) +/// +/// \endrst +TORCH_API std::vector pickle( + const IValue& ivalue, + std::vector* tensor_table = nullptr); + +/// Save a `torch::IValue` in a format that can be loaded by both +/// `torch::pickle_load` in C++ and `torch.load` in Python. +TORCH_API std::vector pickle_save(const IValue& ivalue); + +/// Deserialize a `torch::IValue` from bytes produced by either +/// `torch::pickle_save` in C++ or `torch.save` in Python +TORCH_API IValue pickle_load(const std::vector& data); + +/// `reader` is a function that takes in a size to read from some pickled +/// binary. `reader` should remember where it last read, and return +/// the number of bytes read. +/// See `torch::pickle` for details. +/// type_resolver is used to resolve any JIT type based on type str +TORCH_API IValue unpickle( + std::function reader, + TypeResolver type_resolver, + c10::ArrayRef tensor_table, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser, + ObjLoader obj_loader = nullptr); + +/// Decode a chunk of memory containing pickled data into its `torch::IValue`s. +/// +/// If any `torch::IValue`s in the pickled data are `Object`s, then a +/// `class_resolver` function must be provided. +/// +/// See `torch::pickle` for details. +TORCH_API IValue unpickle( + const char* data, + size_t size, + TypeResolver type_resolver = nullptr, + c10::ArrayRef tensor_table = {}, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser); + +/// Decode a chunk of memory containing pickled data into its `torch::IValue`s. +/// +/// If any `torch::IValue`s in the pickled data are `Object`s, then a +/// `class_resolver` function must be provided. +/// +/// See `torch::pickle` for details. +TORCH_API IValue unpickle( + const char* data, + size_t size, + ObjLoader obj_loader, + TypeResolver type_resolver = nullptr, + c10::ArrayRef tensor_table = {}, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization.h new file mode 100644 index 0000000000000000000000000000000000000000..bbfd533cd17891d415cd10681a1816fc7a9d78a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include + +#include + +#include +#include + +namespace c10 { +struct IValue; +} + +namespace torch { +namespace jit { + +class Pickler; +class SourceRangeSerializer; +static constexpr size_t kByteOffsetIndex = 0; +static constexpr size_t kSourceRangeIndex = 1; +static constexpr size_t kSourceRangeTagIndex = 2; +constexpr c10::string_view kFormatWithStringTable = "FORMAT_WITH_STRING_TABLE"; + +class SourceRangePickler { + public: + SourceRangePickler(); + + std::vector pickle( + const SourceRangeRecords& ranges, + const SourceRangeTagMap& source_range_tags); + + private: + std::shared_ptr srs; +}; + +class SourceRangeDeserializer { + public: + SourceRangeDeserializer() = default; + explicit SourceRangeDeserializer(const c10::IValue& text_table) { + for (const auto& x : text_table.toTuple()->elements()) { + text_table_.emplace_back(std::make_shared(x.toStringRef())); + } + } + SourceRange deserialize(const c10::IValue& iv); + + private: + std::shared_ptr deserialize_source(const c10::IValue& iv); + std::unordered_map< + c10::intrusive_ptr, + std::shared_ptr> + cached_sources; + std::vector> text_table_; +}; + +class SourceRangeUnpickler { + public: + virtual c10::optional findSourceRangeThatGenerated( + const SourceRange& range) = 0; + + virtual ~SourceRangeUnpickler() = default; +}; + +TORCH_API void setShouldUseFormatWithStringTable( + bool should_use_format_with_string_table); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h new file mode 100644 index 0000000000000000000000000000000000000000..496ecbdbbc6c530e50e0d3a8e3b815e9a5f005e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API BackendData { + public: + struct Info { + /** + * Used by Lazy Graph Executor to tag info on BackendData objs + * */ + virtual ~Info() = default; + }; + /** + * Represents (Tensor) data stored on a backend device + * in its native format. + * */ + using Handle = int64_t; + + BackendData(BackendDevice device, Shape shape) + : device_(std::move(device)), shape_(std::move(shape)) {} + + virtual ~BackendData() = default; + + const BackendDevice& device() const { + return device_; + } + + const Shape& shape() const { + return shape_; + } + + Info* info() const { + return info_.get(); + } + + std::shared_ptr SetInfo(std::shared_ptr info) { + std::swap(info, info_); + return info; + } + + virtual Handle GetHandle() = 0; + + virtual void Assign(const BackendData& data) = 0; + + virtual bool HasValue() const = 0; + + private: + BackendDevice device_; + Shape shape_; + std::shared_ptr info_; +}; + +using BackendDataPtr = std::shared_ptr; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h new file mode 100644 index 0000000000000000000000000000000000000000..4c239d1e4b71c0049e28e05a3e67a35f68516232 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { +struct Device; +} + +namespace torch { +namespace lazy { + +// Backend should extend it and define their own supported hardware types. +struct TORCH_API BackendDeviceType { + int8_t type{(int8_t)at::kCPU}; + // Note: previous default value was '0', which actually maps to at::kCPU, at + // least now it is explicit, we may want to make default/undefined semantics + // more clear though + BackendDeviceType() : type((int8_t)at::kCPU) {} + BackendDeviceType(int8_t type) : type(type) {} + + virtual ~BackendDeviceType() = default; + virtual std::string toString() const { + return "Unknown"; + } +}; + +class TORCH_API BackendDevice { + public: + // The default constructor will set both the device type and ordinal + // to backend specific defaults. + BackendDevice(); + BackendDevice(std::shared_ptr&& type, int64_t ordinal); + + int8_t type() const; + int64_t ordinal() const { + return ordinal_; + } + + bool operator==(const BackendDevice& other) const { + return compare(other) == 0; + } + bool operator!=(const BackendDevice& other) const { + return compare(other) != 0; + } + bool operator<(const BackendDevice& rhs) const { + return compare(rhs) < 0; + } + + std::string toString() const; + + private: + int compare(const BackendDevice& rhs) const; + + // Use shared_ptr instead of unique_ptr so that BackendDevice can be copied. + std::shared_ptr type_; + int64_t ordinal_; +}; + +TORCH_API std::ostream& operator<<( + std::ostream& os, + const BackendDevice& device); + +// Helpers for converting a c10::Device to BackendDevice and vice versa. +TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device); +TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device); + +// Tries to extract the backend device out of the lazy tensor. Returns nullopt +// if the input is not a lazy tensor. +TORCH_API c10::optional GetBackendDevice( + const at::ITensorListRef tensors); +TORCH_API c10::optional GetBackendDevice( + const at::TensorList tensors); +TORCH_API c10::optional GetBackendDevice( + const at::Tensor& tensor); +TORCH_API c10::optional GetBackendDevice( + const c10::optional& device); + +// For variadic template. +TORCH_API c10::optional GetBackendDevice(); + +template +c10::optional GetBackendDevice( + const T& tensor, + const Args&... forward_tensors) { + auto optional_device = GetBackendDevice(tensor); + if (optional_device) { + return optional_device; + } + return GetBackendDevice(forward_tensors...); +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..f94d3b602e52c889c96162143f8d6eb8b8d0237b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h @@ -0,0 +1,158 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +struct IrBuilder; + +/** + * Work in progress- don't treat this as a stable interface yet! + */ +class TORCH_API BackendImplInterface { + public: + virtual ~BackendImplInterface() = default; + + /** + * Initialization/Teardown + * */ + // No-op by default. Allows custom functionality to be exposed through + // extension bindings. + virtual void InitializeAtenBindings() const {} + + virtual void PrepareToExit() const = 0; + + /** + * Configuration + * */ + + virtual void SetRngSeed(size_t seed) const = 0; + + /** + * IR Tracing + * */ + + virtual const IrBuilder* GetIrBuilder() const = 0; + + /** + * Data Transfer + * */ + + virtual BackendDataPtr MakeComputationDataFromTensor( + const at::Tensor& tensor, + const Shape& shape, + const BackendDevice& device) const = 0; + virtual BackendDataPtr MakeComputationDataFromScalar( + const at::Scalar& scalar, + const torch::lazy::BackendDevice& device) const = 0; + virtual BackendDataPtr CreateDataPlaceholder( + const BackendDevice& device, + const Shape& shape) const = 0; + + // Gets backend data if the node is a device data node. Otherwise returns + // nullptr + virtual BackendDataPtr GetComputationDataFromNode(const Node*) const = 0; + + virtual at::Tensor MakeTensorFromComputationData( + const BackendDataPtr data, + c10::optional logical_scalar_type) const = 0; + + /** + * Lowering, Compilation, Execution + * */ + + virtual std::unique_ptr CreateLoweringContext( + const std::string& name, + BackendDevice device, + c10::ArrayRef post_order, + Util::EmissionMap emit_status) const = 0; + + virtual std::unique_ptr CreateLoweringContext( + const std::string& name, + BackendDevice device) const = 0; + + // TODO(whc) need to keep this? + virtual std::vector GetCompilationDevices( + const std::string& device, + c10::ArrayRef devices) const = 0; + + virtual std::vector Compile( + std::vector instances) const = 0; + + virtual std::vector ExecuteComputation( + torch::lazy::ComputationPtr computation, + c10::ArrayRef arguments, + const BackendDevice& device) const = 0; + + /** + * Device Configuration + * */ + + // Set or get the default device type. + // For backends used with virtual c10::Devices, this configures what real + // device type the backend should use, and matters if the backend supports + // more than one type of real device. + virtual std::shared_ptr GetDefaultDeviceType() const = 0; + virtual void SetDefaultDeviceType(int8_t type) = 0; + + // Set or get the default device ordinal. + // For backends that supports multi-device, this configures what the + // default device the backend should use. + virtual int64_t GetDefaultDeviceOrdinal() const = 0; + virtual void SetDefaultDeviceOrdinal(int64_t) = 0; + + // Specify which aten device should be used for eager fallback + // may change depending on current 'Default' DeviceType + virtual at::DeviceType EagerFallbackDeviceType() const = 0; + + // Query all available backend devices + virtual std::vector GetBackendDevices() const = 0; + + virtual std::string CreateMetricReport() const { + return ""; + } + + // Map a particular c10:: device to a concrete backend device + // Note:: c10:: devices may be virtual or concrete. xla:: and lazy:: are + // virtual devices, meaning they may map to a gpu, tpu, etc. behind the + // scenes. In the future, non-virtual c10:: devices may also use lazy tensors + // through a mode, in which case these APIs should still work, but should be + // identity mappings. + virtual BackendDevice GetBackendDevice(c10::Device device) const = 0; + + // TODO(whc) + // Additional APIs expected for supporting distributed training, to be + // designed + + /** + * Debug/Metrics + * */ + + // virtual std::map GetMetrics() const = 0; + + // virtual MemoryInfo GetMemoryInfo(const std::string& device) = 0; + + virtual std::string GetComputationBackendText( + const ComputationPtr computation) const = 0; +}; + +class TORCH_API BackendRegistrar { + public: + BackendRegistrar(const BackendImplInterface* backend_impl_interface); +}; + +TORCH_API bool hasBackend(); +TORCH_API const BackendImplInterface* getBackend(); + +TORCH_API const IrBuilder* getIrBuilder(); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h new file mode 100644 index 0000000000000000000000000000000000000000..49e7b8be58cbf234b546bc4988e870a520d797f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h @@ -0,0 +1,114 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API Computation { + public: + virtual int parameters_size() const = 0; + + virtual const std::vector& parameter_shapes() const = 0; + + virtual const std::vector& parameter_names() const = 0; + + virtual const Shape& result_shape() const = 0; + + virtual const std::string to_string() const = 0; + + virtual ~Computation() = default; + + // Indicates whether this computation is being executed inside a mark step + // Assume false unless set otherwise + bool in_mark_step = false; +}; + +using ComputationPtr = std::shared_ptr; + +// Keeps track of the code generation state. +class TORCH_API LoweringContext { + public: + LoweringContext(const std::string& name, BackendDevice device); + LoweringContext( + const std::string& name, + BackendDevice device, + c10::ArrayRef post_order, + Util::EmissionMap emit_status); + + virtual ~LoweringContext() = default; + + static std::unique_ptr Create( + const std::string& name, + BackendDevice device, + c10::ArrayRef post_order, + Util::EmissionMap emit_status); + + static std::unique_ptr Create( + const std::string& name, + BackendDevice device); + + const BackendDevice& device() const { + return device_; + }; + + // Retrieves the vector holding all the tensors associated with the parameter + // instructions which have been created. + const std::vector& GetParametersData() const; + + // Adds a new input/output alias. + virtual void SetUpAlias( + const std::vector& output_index, + int64_t param_number, + const std::vector& param_index, + bool must_alias = false) { + // Dummy default implementation to do nothing. + } + + // Check if parameter shape matches result at index. + virtual bool CheckResultShape( + const BackendDataPtr& parameter_data, + size_t result_idx) { + // Dummy default implementation to do nothing. + return false; + } + + // Adds the given output as a component of the result tuple and returns its + // assigned position within the tuple. + virtual size_t AddResult(const torch::lazy::Output& output) = 0; + + // Associates the given output with the input parameter of the given index and + // shape. Only used for the operator-by-operator execution, mostly for + // debugging purposes. + virtual void AddParameter( + const torch::lazy::Output& output, + size_t index, + const Shape& shape, + const std::string& name) = 0; + + // Build the computation capturing all the operations created with the + // embedded builder (returned by the builder() API). + virtual ComputationPtr Build() = 0; + + size_t GetEmittedNodeCount() const { + return emit_status_.size(); + } + + protected: + BackendDevice device_; + std::vector parameters_; + std::vector parameter_sequence_; + Util::EmissionMap emit_status_; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h new file mode 100644 index 0000000000000000000000000000000000000000..16d2b80701536166c817ad609db14f342e6c94d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h @@ -0,0 +1,28 @@ +#pragma once +#include +#include + +C10_DECLARE_bool(torch_lazy_ir_debug); +C10_DECLARE_bool(torch_lazy_handle_special_scalars); +C10_DECLARE_bool(torch_lazy_all_numbers_special_scalars); +C10_DECLARE_bool(torch_lazy_param_aliasing); +C10_DECLARE_bool(torch_lazy_reuse_ir); +C10_DECLARE_bool(torch_lazy_use_thread_pool); +C10_DECLARE_bool(torch_lazy_enable_device_data_cache); + +C10_DECLARE_int(torch_lazy_compilation_cache_size); +C10_DECLARE_int(torch_lazy_device_data_cache_size); +C10_DECLARE_int(torch_lazy_io_thread_pool_size); +C10_DECLARE_int(torch_lazy_metrics_samples); +C10_DECLARE_int(torch_lazy_trim_graph_check_frequency); +C10_DECLARE_int(torch_lazy_trim_graph_size); + +C10_DECLARE_string(torch_lazy_metrics_percentiles); + +C10_DECLARE_int(torch_lazy_shape_cache_size); + +namespace torch { +namespace lazy { +TORCH_API std::string& getLTCForceFallback(); +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/helpers.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..276cf4b07e6b23ebcf712172a9b84ed1cc717c83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/helpers.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// TODO: Consolidate this file with util.h + +namespace torch { +namespace lazy { + +// Converts an iterable container to a vector of int64's. +template +static std::vector ToI64Vector(const S& input) { + return ToVector(input); +} + +// Creates a set of dimension by dropping the drop_dims ones. +TORCH_API std::vector DropDimensions( + c10::ArrayRef sizes, + c10::ArrayRef drop_dims); + +// Get the canonical dimension index in the [0, rank) interval. Negative +// indices are interpreted as follows: -1 is rank-1, -2 is rank-2 etc. +TORCH_API int64_t GetCanonicalDimensionIndex(int64_t dim, int64_t rank); + +// Same as above, for multiple dimensions. +TORCH_API std::vector GetCanonicalDimensionIndices( + c10::ArrayRef dimensions, + int64_t rank); + +// Returns the canonical position in the dim dimension, handling negative +// values for the position. +TORCH_API int64_t GetCanonicalPosition( + c10::ArrayRef dimensions, + int64_t dim, + int64_t pos); + +// Creates a transposition from the given input and dimensions. +TORCH_API std::vector MakeTransposePermutation( + int64_t dim0, + int64_t dim1, + int64_t rank); + +// Calculates the protomoted shape to which the input shapes should be +// broadcasted for an elementwise operation. The size of the common dimensions +// (2,3,4 for shape1, and 0,1,2 for shape2) must either match, or either one +// of the two be 1. +// Example: +// shape1 = [9, 7, 6, 1, 2] +// shape2 = [6, 5, 2] +// result_shape = [9, 7, 6, 5, 2] +TORCH_API std::vector GetPromotedShape( + c10::ArrayRef shape1_dims, + c10::ArrayRef shape2_dims); + +TORCH_API Shape +GetPromotedBinaryOpShape(const Shape& shape1, const Shape& shape2); + +TORCH_API std::vector StrSplit(c10::string_view text, char delim); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_builder.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..3b58d00aace6c7676d3a3544706f9db9d9b460be --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_builder.h @@ -0,0 +1,150 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +// This file is part of the backend interface. So, ops shouldn't be added or +// removed without due process The exception to this being the view ops which +// will be removed soon pending functionalization + +namespace torch { +namespace lazy { + +template +NodePtr ReuseNode(Args&&... args) { + if (FLAGS_torch_lazy_reuse_ir) { + return LookupNodeFromTrieCache(std::forward(args)...); + } + return nullptr; +} + +// Caching an IR node into TrieCache +static inline void CacheNode(NodePtr node) { + if (FLAGS_torch_lazy_reuse_ir) { + TrieCache::Get()->Insert(std::move(node)); + } +} + +template +NodePtr MakeNode(Args&&... args) { + return std::make_shared(std::forward(args)...); +} + +// op is passed in for a more efficient node casting, see the implementation of +// NodeCast +template +NodePtr ReuseOrMakeNode(Args&&... args) { + NodePtr node = ReuseNode(std::forward(args)...); + if (!node) { + node = MakeNode(std::forward(args)...); + CacheNode(node); + } + return node; +} + +struct IrBuilder { + virtual NodePtr MakeDeviceData( + const std::shared_ptr& data) const = 0; + virtual NodePtr MakeScalar( + const at::Scalar& value, + const at::ScalarType& type) const = 0; + virtual NodePtr MakeExpand( + const Value& input0, + const std::vector& size, + const bool& is_scalar_expand) const = 0; + virtual NodePtr MakeCast( + const Value& input0, + const at::ScalarType& dtype, + const c10::optional& stype = c10::nullopt) const = 0; + virtual NodePtr MakeTensorList(const OpList& inputs) const = 0; + virtual NodePtr MakeGeneric( + const OpKind& op, + const OpList& operands, + const Shape& shape, + const size_t& num_outputs = 1, + const hash_t& hash_seed = static_cast(0x5a2d296e9)) const = 0; + + // dynamic ir nodes + virtual NodePtr MakeSizeNode(const Value& input, size_t dim) const = 0; + virtual NodePtr MakeSizeAdd(const Value& a, const Value& b) const = 0; + virtual NodePtr MakeSizeMul(const Value& a, const Value& b) const = 0; + virtual NodePtr MakeSizeDiv(const Value& a, const Value& b) const = 0; + + virtual ~IrBuilder() = default; +}; + +static inline NodePtr MakeDeviceData(const std::shared_ptr& data) { + return getIrBuilder()->MakeDeviceData(data); +} +static inline NodePtr MakeScalar( + const at::Scalar& value, + const at::ScalarType& type) { + return getIrBuilder()->MakeScalar(value, type); +} +static inline NodePtr MakeExpand( + const Value& input0, + const std::vector& size, + const bool& is_scalar_expand) { + return getIrBuilder()->MakeExpand(input0, size, is_scalar_expand); +} +static inline NodePtr MakeCast( + const Value& input0, + const at::ScalarType& dtype, + const c10::optional& stype = c10::nullopt) { + return getIrBuilder()->MakeCast(input0, dtype, stype); +} +static inline NodePtr MakeTensorList(const OpList& inputs) { + return getIrBuilder()->MakeTensorList(inputs); +} +static inline NodePtr MakeGeneric( + const OpKind& op, + const OpList& operands, + const Shape& shape, + const size_t& num_outputs = 1, + const hash_t& hash_seed = static_cast(0x5a2d296e9)) { + return getIrBuilder()->MakeGeneric( + op, operands, shape, num_outputs, hash_seed); +} + +// dynamic ir nodes +static inline NodePtr MakeSizeNode(const Value& input, size_t dim) { + return getIrBuilder()->MakeSizeNode(input, dim); +} +static inline NodePtr MakeSizeAdd(const Value& a, const Value& b) { + return getIrBuilder()->MakeSizeAdd(a, b); +} +static inline NodePtr MakeSizeMul(const Value& a, const Value& b) { + return getIrBuilder()->MakeSizeAdd(a, b); +} +static inline NodePtr MakeSizeDiv(const Value& a, const Value& b) { + return getIrBuilder()->MakeSizeDiv(a, b); +} + +inline Value GetSymIntValue(c10::SymInt a) { + if (auto ma = a.maybe_as_int()) { + return Value(MakeScalar(*ma, at::kLong), 0); + } else { + return Value( + dynamic_cast(a.toSymNodeImplUnowned()) + ->node_, + 0); + } +} + +// TODO: this should return Value +inline std::vector GetSymIntArrayRefValue(c10::SymIntArrayRef arr) { + std::vector r; + for (const auto& a : arr) { + r.emplace_back(a.guard_int(__FILE__, __LINE__)); + } + return r; +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..d2edbb75ffba338bcc79e33e529fdd3005b3f1dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h @@ -0,0 +1,426 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API LazyGraphExecutor { + public: + struct DeviceDataInfo : public BackendData::Info { + DeviceDataInfo(int64_t tensor_id, bool read_only) + : tensor_id(tensor_id), read_only(read_only) {} + + int64_t tensor_id = 0; + bool read_only = false; + }; + + // Register a lazy graph executor instance that can be retrieved using Get() + static void Register(LazyGraphExecutor*); + static LazyGraphExecutor* Get(); + + virtual ~LazyGraphExecutor() = default; + + // Override these methods to perform custom tensor registration and + // unregistration Note: It is vital that the parent implementations are also + // called in order for the tensors to show up in the live tensor list + virtual void RegisterTensor(std::shared_ptr data); + virtual void UnregisterTensor(LazyTensor::Data* data); + + // Seed for random generator. + // Override to supply your own DeviceContextArena. + virtual Value GetRngSeed(const BackendDevice& device); + virtual uint64_t GetRunningSeed(const BackendDevice& device); + virtual void SetRngSeed(const BackendDevice& device, uint64_t seed); + + void DeviceBarrier(const BackendDevice& device); + + BackendDataPtr GetDeviceData( + const at::Tensor& tensor, + const BackendDevice& device); + + BackendDataPtr GetDeviceData( + const at::Scalar& value, + at::ScalarType scalar_type, + const BackendDevice& device); + + // Retrieves the set of lazy tensors which are currently live in the system, + // for the given device. If device is nullptr, the live tensors for all + // devices will be returned. Returned tensors are sorted by device as primary + // key, and by unique ID as secondary key. + std::vector GetLiveTensors(const BackendDevice* device); + + // Makes sure that any outstanding IR operation accumulated over live tensors, + // gets turned into device data. If wait is true, the sync operation will be + // run synchronously. The devices argument, if not empty, tells the devices + // which should be partecipating into the replicated computation. + virtual void SyncLiveTensorsGraph( + const BackendDevice* device, + c10::ArrayRef devices, + bool wait); + + // Applies all the pending IR operations queued over the input tensors. All + // the tensors must be on the same device. If wait is true, the sync operation + // will be run synchronously. The devices argument, if not empty, tells the + // devices which should be partecipating into the replicated computation. + void SyncTensorsGraph( + std::vector* tensors, + c10::ArrayRef devices, + bool wait, + bool sync_ltc_data); + + // Marks an execution step, which allows the tensor framework to understand + // the computation boundaries. + // Override to supply your own DeviceContextArena. + virtual void MarkStep(const BackendDevice& device); + + // Waits for all the outstanding operations on all the supplied devices. + // If devices is empty, the wait will happen for all local devices. + void WaitDeviceOps(c10::ArrayRef devices); + + // Retrieves the PyTorch CPU tensors behind the lazy tensors IR operations. + // All the tensors must be on the same device. + std::vector GetTensors(std::vector* tensors); + + size_t IncTrimCounter() const; + + // Dumps the backend specific text of the computation accumulated in the graph + // which is attached the tensors. + std::string DumpBackendComputation(const std::vector& tensors); + + Value GetDeviceDataIrValue( + const at::Scalar& value, + c10::ScalarType type, + const BackendDevice& device); + Value GetIrValueForScalar( + const at::Scalar& value, + c10::ScalarType type, + const BackendDevice& device); + Value GetIrValueForScalar( + const at::Scalar& value, + const BackendDevice& device); + + // TODO: even though this API is currently used **only** in codegen to + // generate real scalar IR values vs scalar tensors, we would like to + // use it in other cases where `GetIrValueForXXXScalar` is used, as well + // In order to do that, we need to untangle the cases where we don't need + // `expand` and where we don't expect a scalar tensor + Value GetIrValueForScalarFromCodegen( + const at::Scalar& value, + const BackendDevice& device); + Value GetIrValueForExpandedScalar( + const at::Scalar& value, + const Shape& shape, + const BackendDevice& device); + + struct CachedComputation { + explicit CachedComputation(ComputationPtr computation) + : computation(std::move(computation)) {} + + ComputationPtr computation; + }; + + using ComputationCache = Cache; + + ComputationCache* GetComputationCache(); + + hash_t GetGraphHash(const std::vector& tensors); + + protected: + // TODO(alanwaketan): Revisit if all of them need to be accessible to + // derived classes. + + struct SyncTensorsConfig { + // Whether we want to force data on the target tensors (hence trimming + // the IR graph above them). + bool force_ltc_data = true; + // Whether when setting the data, the other properties of the tensor + // state should be reset. + bool sync_ltc_data = true; + }; + + struct SyncTensorCollection { + SyncTensorCollection() : hash(0) {} + + SyncTensorsConfig config; + std::vector indices; + hash_t hash; + std::vector unlocker; + BackendDevice device; + }; + + struct PostOrderData { + std::vector post_order; + Util::EmissionMap emission_map; + std::vector parameters_data; + std::vector parameter_sequence; + }; + + // Locking: + // We perform two kinds of operations of tensors, synchronous and + // asynchronous. The ApplyPendingGraph() are synchronous, as we need the + // device data result immediately. Before the synchronous operations can + // start, they need to wait that the pending asynchronous operations have + // completed. Synchronous operations do not hold device locks, since they are + // strictly sequential, dictated by the PyTorch execution order. The + // SyncTensorsGraph() is asynchronous, and returns immediately after having + // scheduled the asynchronous operation. While executing, the asynchronous + // operations will hold locks on all the participating devices (in most common + // cases there will be only one device). + // Since asynchronous operations capture device locks, only one asynchronous + // operation can execute at the same time, on a given device. Tensor + // operations which send data to device do not need to hold any device locks + // while doing so. Only operations which _use_ device data (computations, and + // transfer from server) need to wait for asynchronous operations to complete + // (barrier). + + class DeviceLocker { + public: + explicit DeviceLocker(BackendDevice device) : device_(std::move(device)) {} + + const BackendDevice& device() const { + return device_; + } + + void Lock(); + void Unlock(std::exception_ptr exptr); + void Barrier(); + + private: + void CheckResetException(); + + BackendDevice device_; + std::mutex mutex_; + std::condition_variable cv_; + bool locked_ = false; + std::exception_ptr exptr_; + }; + + class DeviceLockerArena { + public: + static DeviceLockerArena* Get(); + + std::shared_ptr GetLocker(const BackendDevice& device); + + void DeviceBarrier(const BackendDevice& device); + + // Use a set to impose an order on the device locking sequence (ABBA + // prevention). + std::vector LockDevices( + const std::set& devices); + + private: + ExceptionCleanup LockDevice(const BackendDevice& device); + + std::mutex mutex_; + std::map> lockers_; + }; + + class DataCacheArena { + public: + static DataCacheArena* Get(); + + BackendDataPtr GetDeviceData( + const at::Tensor& tensor, + const BackendDevice& device); + + BackendDataPtr GetDeviceData( + const at::Scalar& value, + at::ScalarType scalar_type, + const BackendDevice& device); + + private: + struct TensorHasher { + size_t operator()(const at::Tensor& tensor) const; + }; + struct TensorComparer { + bool operator()(const at::Tensor& tensor1, const at::Tensor& tensor2) + const; + }; + + explicit DataCacheArena(size_t max_cache_size); + + using DataCache = + Cache; + + DataCache* GetDataCache(const BackendDevice& device); + + size_t max_cache_size_ = 0; + std::mutex mutex_; + std::map> device_caches_; + }; + + // The DeviceContextArena holds per device live information and statistics, + // among which the lazy tensors which are currently alive in the system. This + // is used to create computation "barriers" in order to flush pending + // operations and ensure the same computations are created during the training + // loops. + // TODO(alanwaketan): Add a registry such that we don't need to make all + // related methods virtual. + class DeviceContextArena { + protected: + struct DeviceContext { + std::mutex lock; + std::map> tensors_data; + uint64_t seed = 101; + uint64_t running_seed = 101; + Value seed_ir_value; + }; + + public: + static DeviceContextArena* Get(); + virtual ~DeviceContextArena() = default; + + void RegisterTensor(std::shared_ptr data); + void UnregisterTensor(LazyTensor::Data* data); + + std::vector GetLiveTensors(const BackendDevice* device); + + // Overriding it allow derived class to use their own IRs for Value. + virtual Value GetRngSeed(const BackendDevice& device); + uint64_t GetRunningSeed(const BackendDevice& device); + void SetRngSeed(const BackendDevice& device, uint64_t seed); + + void MarkStep(const BackendDevice& device); + + std::vector GetActiveDevices(); + + protected: + DeviceContext* GetDeviceContext(const BackendDevice& device); + + void ForAllDeviceContexts( + const std::function& fn, + const BackendDevice* device); + + // Overriding it allow derived class to use their own conversions. + virtual Value IrValueFromScalar( + const at::Scalar& value, + at::ScalarType scalar_type, + const BackendDevice& device); + + private: + std::vector GetAllDeviceContexts(); + + std::mutex lock_; + std::map device_contexts_; + }; + + struct Async { + Async( + SyncTensorCollection* coll, + std::vector parameters_data, + std::vector tensors_data, + ComputationCache::TypePtr cached_computation); + virtual ~Async() = default; + + void Wait(); + + MultiWait mwait; + std::vector indices; + std::vector unlocker; + std::vector parameters_data; + BackendDevice device; + ComputationCache::TypePtr cached_computation; + std::vector tensors_data; + }; + + void ResetTrimCounter() const; + + // Waits for this SyncTensorCollection's device barrier and acquire the lock. + virtual void TensorCollectionBarrier(SyncTensorCollection* coll); + + // One can override to insert your own profiler. + virtual PostOrderData RunPostOrder( + const std::vector& ir_values, + SyncTensorCollection* coll); + + private: + struct CompilationResult { + BackendDevice device; + size_t emitted_nodes = 0; + ComputationPtr computation; + std::vector parameters_data; + }; + + virtual bool ShouldSyncTensor(const LazyTensorPtr& tensor) const; + + SyncTensorCollection CollectSyncTensors( + const std::vector& tensors, + const SyncTensorsConfig& config); + + std::vector CollectRoots( + const std::vector& tensors, + c10::ArrayRef indices); + + std::vector SetTensorData( + std::vector* tensors, + const SyncTensorsConfig& config, + c10::ArrayRef indices, + const std::vector& tensor_data_vec); + + void ExtractIRAndPrepareTensorData( + std::vector* tensors, + const SyncTensorsConfig& config, + c10::ArrayRef indices, + std::vector& ir_values, + std::vector& tensor_data_vec); + + std::shared_ptr TryRunCachedSync( + std::vector* tensors, + SyncTensorCollection* coll, + PostOrderData* po_data, + const std::vector& tensor_data_vec); + + CompilationResult Compile( + const std::vector& tensors, + c10::ArrayRef devices, + const SyncTensorCollection& coll, + PostOrderData* po_data, + const std::vector& ir_values); + + ComputationCache::TypePtr LookupCachedCompile(const hash_t& hash); + + std::shared_ptr SyncTensorsGraphInternal( + std::vector* tensors, + c10::ArrayRef devices, + const SyncTensorsConfig& config); + + // Schedules the execution of a sync tensors operation in background. The + // asynchronous operation will hold the device locks by capturing the ones + // present within the coll structure. + std::shared_ptr ScheduleSyncTensorsGraph( + SyncTensorCollection* coll, + std::vector parameters_data, + std::vector tensors_data, + ComputationCache::TypePtr cached_computation); + + std::shared_ptr ScheduleSyncTensorsGraph( + std::vector* tensors, + SyncTensorCollection* coll, + std::vector parameters_data, + ComputationCache::TypePtr cached_computation, + const std::vector& tensor_data_vec); + + std::vector GetTensorsFused(std::vector* tensors); + + std::vector FetchTensors( + std::vector* tensors, + c10::ArrayRef tensors_data, + const std::vector* indices); + + // Gathers the device data for all the input tensors, after an + // asynchronous operation. + std::vector GatherTensorsData( + const std::vector& tensors, + c10::ArrayRef indices, + c10::ArrayRef tensors_data); +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h new file mode 100644 index 0000000000000000000000000000000000000000..d970b008e1b6b81ca7ad7535e98bedb702fc93a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h @@ -0,0 +1,62 @@ +/** + * This file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/multi_wait.h + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace lazy { + +// Support waiting for a number of tasks to complete. +class TORCH_API MultiWait { + public: + explicit MultiWait(size_t count) : count_(count) {} + + // Signal the completion of a single task. + void Done(); + + // Waits until at least count (passed as constructor value) completions + // happened. + void Wait(); + + // Same as above, but waits up to wait_seconds. + void Wait(double wait_seconds); + + // Resets the threshold counter for the MultiWait object. The completed count + // is also reset to zero. + void Reset(size_t count); + + // Creates a completer functor which signals the mult wait object once func + // has completed. Handles exceptions by signaling the multi wait with the + // proper status value. This API returns a function which captures a MultiWait + // reference, so care must be taken such that the reference remains valid for + // the whole lifetime of the returned function. + std::function Completer(std::function func); + + // Similar as the above API, but with explicit capture of the MultiWait shared + // pointer. + static std::function Completer( + std::shared_ptr mwait, + std::function func); + + private: + void Complete(const std::function& func); + + std::mutex mutex_; + std::condition_variable cv_; + size_t count_ = 0; + size_t completed_count_ = 0; + std::exception_ptr exptr_; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..571a55b468fdd5a7114aa3c30221604ed0113795 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h @@ -0,0 +1,37 @@ +/** + * This file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h + */ + +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace lazy { + +class TORCH_API Completion { + public: + class Data; + + explicit Completion(std::shared_ptr data); + + ~Completion(); + + void Wait(); + + private: + std::shared_ptr data_; +}; + +// Schedules a closure which might wait for IO or other events/conditions. +TORCH_API void ScheduleIoClosure(std::function closure); +TORCH_API Completion +ScheduleIoClosureWithCompletion(std::function closure); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h new file mode 100644 index 0000000000000000000000000000000000000000..a3d35783ae9691694dfe370bf1371b053fa6eb3c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h @@ -0,0 +1,126 @@ +/** + * Most of the utils in this file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/util.h + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace lazy { + +// Similar to c10::scope_exit but with a status. +// TODO(alanwaketan): Consolidate it with c10::scope_exit. +template +class Cleanup { + public: + using StatusType = T; + + explicit Cleanup(std::function&& func) + : func_(std::move(func)) {} + Cleanup(Cleanup&& ref) noexcept + : func_(std::move(ref.func_)), status_(std::move(ref.status_)) {} + Cleanup(const Cleanup&) = delete; + + ~Cleanup() { + if (func_ != nullptr) { + func_(std::move(status_)); + } + } + + Cleanup& operator=(const Cleanup&) = delete; + + Cleanup& operator=(Cleanup&& ref) noexcept { + if (this != &ref) { + func_ = std::move(ref.func_); + status_ = std::move(ref.status_); + } + return *this; + } + + void Release() { + func_ = nullptr; + } + + void SetStatus(StatusType&& status) { + status_ = std::move(status); + } + + const StatusType& GetStatus() const { + return status_; + } + + private: + std::function func_; + StatusType status_; +}; + +using ExceptionCleanup = Cleanup; + +// Allows APIs which might return const references and values, to not be forced +// to return values in the signature. +// TODO(alanwaketan): This is clever, but is there really no std or c10 +// supports? Needs more investigations. +template +class MaybeRef { + public: + /* implicit */ MaybeRef(const T& ref) : ref_(ref) {} + /* implicit */ MaybeRef(T&& value) + : storage_(std::move(value)), ref_(*storage_) {} + + const T& Get() const { + return ref_; + } + const T& operator*() const { + return Get(); + } + operator const T&() const { + return Get(); + } + + bool IsStored() const { + return storage_.has_value(); + } + + private: + c10::optional storage_; + const T& ref_; +}; + +template +std::vector Iota(size_t size, T init = 0, T incr = 1) { + std::vector result(size); + T value = init; + for (size_t i = 0; i < size; ++i, value += incr) { + result[i] = value; + } + return result; +} + +template +std::vector ToVector(const S& input) { + return std::vector(input.begin(), input.end()); +} + +template +c10::optional> ToOptionalVector( + c10::OptionalArrayRef arrayRef) { + if (arrayRef) { + return arrayRef->vec(); + } + return c10::nullopt; +} + +template +typename std::underlying_type::type GetEnumValue(T value) { + return static_cast::type>(value); +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/tensor/python_tensor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/tensor/python_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..9040f84ac4b72638d38e3e7bc9aac61914e57e42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/tensor/python_tensor.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { +class Tensor; +} // namespace at + +namespace torch { +namespace tensors { + +// Initializes the Python tensor type objects: torch.FloatTensor, +// torch.DoubleTensor, etc. and binds them in their containing modules. +void initialize_python_bindings(); + +// Same as set_default_tensor_type() but takes a PyObject* +void py_set_default_tensor_type(PyObject* type_obj); + +// Same as py_set_default_tensor_type, but only changes the dtype (ScalarType). +void py_set_default_dtype(PyObject* dtype_obj); + +// Gets the DispatchKey for the default tensor type. +// +// TODO: This is nuts! There is no reason to let the default tensor type id +// change. Probably only store ScalarType, as that's the only flex point +// we support. +TORCH_API c10::DispatchKey get_default_dispatch_key(); +at::Device get_default_device(); + +// Gets the ScalarType for the default tensor type. +at::ScalarType get_default_scalar_type(); +} // namespace tensors +} // namespace torch