diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h new file mode 100644 index 0000000000000000000000000000000000000000..c6b3f9376d6b328f1d7eaf6cda14b007a6fc8e45 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool AddIfThenElseOp(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h new file mode 100644 index 0000000000000000000000000000000000000000..18e9f67641e048fa78865716237c385dc5ba2321 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void AnnotateWarns(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h new file mode 100644 index 0000000000000000000000000000000000000000..ca21f2c60d0315a356df82791a1ba60f9cfc0123 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h @@ -0,0 +1,15 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void Autocast(const std::shared_ptr& graph); + +TORCH_API bool setAutocastMode(bool value); +TORCH_API bool autocastEnabled(); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..266f54023e1a153492f6f37160c04c7e013e588d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +// Replaces prim::Guard nodes with prim::BailOut nodes and +// computes sets of inputs needed to resume execution at +// bailout points +TORCH_API void InsertBailOuts(std::shared_ptr graph); + +// Builds a bailout graph into `target` (which is an empty graph) +// for a given bailout point `bailout_index` +// from the original graph `orig` (the original unoptimized graph) +// BailOut graphs allow Interpreter to resume +// execution of the (un/de)optimized graph (i.e. +// a graph that doesn't rely on any assumptions derived from +// on profiling information) from a given BailOut point +// should any of the assumptions fail for an actual input. +TORCH_API std::shared_ptr BuildBailOutGraphFrom( + int64_t bailout_index, + const std::shared_ptr& orig, + const std::shared_ptr& target); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..643134750cc483439685c4dde566ead0d5bc7a3a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void BatchMM(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h new file mode 100644 index 0000000000000000000000000000000000000000..46d90d1a515f66fa19ac37c6d8621ba5f6e687de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API std::shared_ptr Canonicalize( + const std::shared_ptr& graph, + bool keep_unique_names = true); + +TORCH_API void CanonicalizeOutputs(std::shared_ptr& graph); + +TORCH_API c10::optional firstOrLastUse(Value* v, bool find_first); + +TORCH_API bool isBeforeOrAfter( + const Use& a, + const Use& b, + bool checking_before); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..87e86d8a7e4b2965a16422cd952418aef7e7db5e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h @@ -0,0 +1,12 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CheckStrictFusion(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h new file mode 100644 index 0000000000000000000000000000000000000000..24add48764c58a143ed755c41ec10f315a1b3207 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Undefinedness makes argument matching fail for regular tensor operations +// if 1+ arguments are undefined or possibly undefined tensors. +// Technically, undefined tensors are **not** tensors as the regular tensor +// operations do not know how to handle them. +// However, in practice, there are guards and conversion operators that +// **always** gate regular operations if undefined tensors may be present +// Eventually, we would love to move to the world where we use optionals +// in lieu of undefined tensors. +// When this happens, this pass will be removed +TORCH_API void ClearUndefinedness(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..9c8956029dace1988981fa98e8a05f9498c25f81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool EliminateCommonSubexpression( + const std::shared_ptr& graph); +} +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..4fdd86f36a46dd0c1be0c53f1731c8e686ba1796 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Eliminates common inputs among `aten::cat` ops. +TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr& graph); + +// Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies +// in the buffers used for concatenation if possible. +TORCH_API void ExpandConcatAndEliminateRedundancy( + const std::shared_ptr& graph); + +TORCH_API bool CombineConcats(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..6eb4ca16077eb41287c5009ebc52744eaf235a62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void ConstantPooling(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h new file mode 100644 index 0000000000000000000000000000000000000000..62293c8d7abc9bc2344ccab38d3a30c18af2fe9d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Runs constant propagation on all objects unless ignore_custom_classes is +// specified as true, in which case user defined classes are skipped. This is +// useful to prevent early fusion of packing operations, which end up lowering +// away information about their constructors (e.g. packed::linear_clamp_prepack +// and prepacked::conv2d_clamp_prepack) +// Returns True if the pass made a change to the graph +TORCH_API bool ConstantPropagation( + std::shared_ptr& graph, + bool ignore_custom_classes = false); + +// runs constant propagation only on ops that have non-aliasing inputs & outputs +// Returns True if the pass made a change to the graph +TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr& graph); + +// Runs the node if its inputs are constants. Callers of this function must +// make their own determination if constant prop is appropriate - for example +// non-deterministic ops or ops with side effects. If ignore_custom_classes is +// specified, nodes that output user defined classes are not run. +TORCH_API c10::optional runNodeIfInputsAreConstant( + const Node* node, + bool ignore_custom_classes = false, + AliasDb* db = nullptr); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h new file mode 100644 index 0000000000000000000000000000000000000000..351816394d80c694d30a2423d8774d3585318af9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void CreateFunctionalGraphs(const std::shared_ptr& graph); + +TORCH_API void InlineFunctionalGraphs(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..400e5997d6368d08edfacc76c969c07828a2c17b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void DecomposeOps(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..b4bcdcdc7ae7ea32abf2bfb49a07be8ec48dd82d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +struct Graph; + +// Propagate tensor properties (e.g., dtype, device, is_contiguous, layout) +// propagation on all tensor objects. Currently, we only support dtype +// propagation +TORCH_API bool DtypePropagation(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0087d306c23bc9b4580d669ec8009ec8b83b9e79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Remove ops that do nothing on the forward pass (like aten::detach). +// This pass is invoked as a part of freeze_module. +// This function also takes a set of custom ops to eliminate. All ops in this +// set must take their output as their first input, i.e. x = f(x, ...) +TORCH_API bool EliminateNoOps( + std::shared_ptr& graph, + std::unordered_set custom_ops = {}); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4aef1f5570694d20141ecc2e04a37eaf2ef0d3b6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Erase NumberType information. This is necessary for and only used in +// exporting to ONNX. This pass ensures that no remaining Values have +// NumberType types, replacing them with tensors. +// The following things are done to erase NumberType info: +// - NumberType outputs are changed to DynamicType. +// - prim::Constant nodes which are numbers get changed into 0-dim tensors of +// the corresponding type +// - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes +// are erased. +// +// The pass assumes that DCE will be called sometime after. +TORCH_API void EraseNumberTypes(const std::shared_ptr& graph); +TORCH_API void EraseNumberTypesOnBlock(Block* block); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h new file mode 100644 index 0000000000000000000000000000000000000000..472d95843a1c6dc921b072b59ea013fcbb6d57ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +// Directly after tracing, we have an ill-formed graph with blocks inserted. +// Example: +// +// graph(%self : ClassType, +// %input.1 : Float(3, 4)): +// %1 : ClassType = prim::GetAttr[name="relu1"](%self) +// %2 : ClassType = prim::GetAttr[name="relu2"](%self) +// %3 : ClassType = prim::GetAttr[name="rrr"](%2) +// = prim::TracedModuleForward[scope="__module.relu1"]() +// block0(): +// %input : Float(3, 4) = aten::relu(%input.1), +// -> () +// = prim::TracedModuleForward[scope="__module.relu2"](), +// block0(): +// = prim::TracedModuleForward[scope="__module.relu2.rrr"](), +// block0(): +// %6 : Float(3, 4) = aten::relu(%input), +// -> () +// -> () +// return (%6) +// +// In this pass, we: +// 1) Lift Value defs to as high of a scope as needed to ensure that +// they dominate all their uses. For example, `input` in the above +// graph needs to be lifted to the top-level block so that its use +// in the second `relu` operator is dominated. +// 2) Lambda lift the blocks. This ensures that all values used within +// each scope have their defs captured. +// 3) Convert the scope blocks into methods on their respective Modules, +// and convert TracedModuleForward nodes to CallMethod nodes into those +// methods. +// +// Then, we'll have a well-formed graph with proper method calls. +TORCH_API void FixupTraceScopeBlocks( + std::shared_ptr& graph, + Module* self); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h new file mode 100644 index 0000000000000000000000000000000000000000..4032d22f2bc13f667cec5025148879cd7117cf83 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this + * module and all its submodules, forward is included by default. + * + * The weight and bias of the Conv2d are correspondingly updated. Should only be + * used on modules in eval mode. + */ +TORCH_API Module FoldConvBatchNorm(const Module& module); + +struct TORCH_API ConvBNParameters { + at::Tensor conv_w; + at::Tensor conv_b; + at::Tensor bn_rm; + at::Tensor bn_rv; + double bn_eps = 0.0; + at::Tensor bn_w; + at::Tensor bn_b; +}; + +/** + * Given the current weight and bias tensors of a Conv module and parameters + * of the BatchNorm module we're folding with, compute the updated values + * for the weight and bias. + * + * The function is basically copied from torch/nn/utils/fusion.py + */ +TORCH_API std::tuple computeUpdatedConvWeightAndBias( + const ConvBNParameters& p); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h new file mode 100644 index 0000000000000000000000000000000000000000..7a50519cd92d5b536d76c851e87d31a2c5911cf8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h @@ -0,0 +1,36 @@ +/** \brief This file defines freezing Torchscript module API. + * + * This API has python-binding and can be invoked directly or as a part of + * general optimization pipeline. + */ +#pragma once + +#include +#include + +/** \brief Freeze Module, i.e., Assume all attributes are constants. + * + * Freezing module is a functionality that allows the JIT to internalize + * immutable attributes. Combined with inlining, the module is aggressively + * optimized and significant overhead is optimized away. The freezeModule API + * produces a cloned frozen module. + */ + +namespace torch { +namespace jit { + +TORCH_API Module freeze_module( + const Module& module, + std::vector preservedAttrs = std::vector(), + bool freezeInterfaces = true, + bool preserveParameters = false); + +// Clone-free version of freeze_module. This modifies the module inplace. +// Use this version to avoid extra memory usage incurred by cloning the module. +TORCH_API void freeze_module_inplace( + Module* module, + std::vector preservedAttrs = std::vector(), + bool freezeInterfaces = true, + bool preserveParameters = false); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..d64cda0a88f2bbc0ebe585bf8b63e5bd94744743 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Concats multiple linear ops with the same Tensor input +// into a single linear op. +TORCH_API bool FrozenConcatLinear(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..95991e73d9eccf7473071c5ed352af56d7c114f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API extern std::function&)>& +getFuseFrozenConvAddReluImpl(); + +TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..65dc138ccd6a41be0fa709516c97a8b89eeafd98 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Fuses Convolution -> Batchnorm into a single Convolution by +// folding batchnorm weights into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr& graph); + +// Fuses Convolution -> Add/Sub into a single Convolution by +// folding add constant tensor into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr& graph); + +// Fuses Convolution -> Mul/Div into a single Convolution by +// folding add constant tensor into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h new file mode 100644 index 0000000000000000000000000000000000000000..87c610781f1126602123bb951f365bc365791f9d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +/** \brief Runs a set of Optimizations that Optimize Frozen Graphs + * + * Currently this set of optimizations is: + * - FoldFrozenConvBatchnorm + * - FoldFrozenConvAddOrSub + * - FoldFrozenConvMulOrDiv + * - FoldFrozenLinearBatchnorm + */ + +namespace torch { +namespace jit { + +TORCH_API void OptimizeFrozenGraph( + std::shared_ptr& graph, + bool optimize_numerics = true); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..bac4bedd53a6bb45999d5d276986d0946e1a2f0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Fuses Linear -> BatchNormNd into a single Linear by +// folding batchnorm weights into linear weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..e952d1c43cef39020405de944bba8b3856398ed3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Transposes the weight matrix for frozen linear modules. +// and converts it into a matmul +TORCH_API bool FrozenLinearTranspose(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h new file mode 100644 index 0000000000000000000000000000000000000000..d6ffc36906ad7e51b3fb1bc940cac69e8fb1b433 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Converts operators & their parameters to mkldnn if it is profitable +// Currently encompassing Conv2d and Conv3d, and Linear +// Op must be in float32 and mkldnn must be built +// This pass only works on frozen graph +TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..56d37518e37866fa4ee14242215e5079c9c30f4b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h @@ -0,0 +1,24 @@ +/** \brief Fusing linear patterns as single at::linear for easier pattern + * matching in later passes + */ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** \brief Match the at::linear pattern and fuse it into a single at::linear + * This pass fuse the addmm or matmul + add generated by JIT back to linear + * This pass can be deleted once the JIT can emit the aten::linear in the future + */ +TORCH_API void FuseLinear(std::shared_ptr& graph); + +/** Swap functional linear CallFunctions to aten::linear + */ +TORCH_API void SwapFunctionalLinear(std::shared_ptr& graph); +/** Swap all functional linear CallFunctions in module + */ +TORCH_API void SwapFunctionalLinear(Module& module); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..6577431368e9b9b8d99dd5995fc805d6b4d2d742 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +TORCH_API void FuseAddRelu(script::Module& module); +TORCH_API void FuseAddRelu(std::shared_ptr& graph); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..aafb442eafb6f5e1b1e506c06627e9e9a03a5eed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool canFuseOnCPULegacy(); +TORCH_API void overrideCanFuseOnCPULegacy(bool value); + +// NB: Be sure to run DCE before fusion, because dead instructions +// can prevent fusion opportunities from being exploited. +// On Windows will noop, NYI +TORCH_API void FuseGraph( + std::shared_ptr& graph, + bool strict_fuser_check = false); + +// \brief Custom fusion pass using a node-level callback to +// determine the inclusion of nodes in a subgraph. +// +// This helper omits aliased inputs and fusion across control flow +// boundaries. +// +// \arg graph The graph to be modified in-place +// \arg is_fusable A callback run on each fusable node in the graph. +// \arg kind The label given to the resultant fused subgraph +// \arg arg_limit The maximum number of args the resultant fused subgraph +// should have. Note: This will likely develop into a general +// post condition on the fused subgraph. +TORCH_API void CustomFuseGraph( + std::shared_ptr& graph, + const std::function& is_fusable, + Symbol kind, + size_t arg_limit = std::numeric_limits::max()); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..0920830babb8b326e994339e8c479593091d36cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace graph_rewrite_helper { + +std::string getFuncName(Value* func_value); +Value* getValue( + const std::string& name, + const std::unordered_map& match_vmap, + const std::unordered_map& vmap); +c10::optional getIValue( + const std::string& name, + const std::unordered_map& match_vmap, + const std::unordered_map& vmap); +TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr& graph); + +bool isClampFusable( + const Match& match, + const std::unordered_map& vmap); + +// This struct contains a compiled IR patterns slated for use in the +// findPatternMatches function. The struct encapsulates the common +// information from parseIR that is used in conjunction with the +// pattern matching facility. A const instance of this struct can +// also be stored away to cache the compiled IR pattern and reduce +// runtime cost +struct PatternInfo { + std::string pattern_string; + std::unique_ptr pattern_graph; + std::unordered_map vmap; + std::vector filters; + + static PatternInfo parse_from_str( + std::string pattern_string, + const std::vector& filters = {}) { + PatternInfo rv{ + std::move(pattern_string), + std::make_unique(), + decltype(vmap){}, + filters}; + parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap); + return rv; + } +}; + +} // namespace graph_rewrite_helper +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..03f13140e370df55903bd9ce00ea04b624bce795 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void EliminateRedundantGuards(std::shared_ptr graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h new file mode 100644 index 0000000000000000000000000000000000000000..37bfa07a1267023bcd6d2227b03dac75d0f233b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +void HoistConvPackedParams(script::Module& m); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h new file mode 100644 index 0000000000000000000000000000000000000000..8edc81224a07321786937bdebde0d19c5c119c22 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool canRunWithAutograd(Node* node); + +TORCH_API void InlineAutodiffSubgraphs( + std::shared_ptr& graph, + size_t threshold = 5); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h new file mode 100644 index 0000000000000000000000000000000000000000..c2dbacdc4ddab7a18f495cfc9f8dc34640f65902 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Inline Fork and Wait calls. This is used, for example, in ONNX export, where +// we do not support the explicit parallelism structures and would rather +// just have a flat graph. This inlines the forked section in the fork() +// callsite and replaces uses of the result of wait() calls with the values +// produced from the (now-inlined) forked section. +TORCH_API void InlineForkWait(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h new file mode 100644 index 0000000000000000000000000000000000000000..164c29f8b6557f50b02401704c2622dc187d86aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void inlineForkedClosures(std::shared_ptr& to_clean); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h new file mode 100644 index 0000000000000000000000000000000000000000..b4db0ad189282d83a2e184993e1c790a41527bf3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Inline function and method calls. +TORCH_API void Inline(Graph& graph); + +TORCH_API GraphFunction* tryToGraphFunction(Node* n); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h new file mode 100644 index 0000000000000000000000000000000000000000..28d9f168bf3d559ad434883954004797ae96e690 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void InsertGuards(std::shared_ptr graph); + +TORCH_API void RemoveProfilingNodes(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h new file mode 100644 index 0000000000000000000000000000000000000000..5614e96c141f4b611418fec08ce917868728eef1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool RefineIntegerValues(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h new file mode 100644 index 0000000000000000000000000000000000000000..c7cee8417fa457d671da2efbc6a19493762fcffb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void liftClosures(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h new file mode 100644 index 0000000000000000000000000000000000000000..7b612dee9622304b5f9279215da7c798b5958b4b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace torch { +namespace jit { + +using SparseBitVector = ::c10::SparseBitVector<256>; + +// BuildLivenessSets computes "bailout" liveness which is equivalent to +// "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}" +TORCH_API std::unordered_map> BuildLivenessSets( + std::shared_ptr graph); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h new file mode 100644 index 0000000000000000000000000000000000000000..5895f2fcee7462b8f9627651e58590d96299ac93 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool UnrollLoops(std::shared_ptr& graph); + +// Only unrolls constant loops. Will unroll them regardless of loop block size +TORCH_API bool UnrollConstantLoops(std::shared_ptr& graph); + +TORCH_API Node* PeelLoop(Node* n, size_t times); + +// return true if graph is modified +TORCH_API bool PeelProfilingLoops(const std::shared_ptr& graph); + +struct TORCH_API LoopsPeeler { + LoopsPeeler(std::function callback, size_t num_iterations = 1) + : callback_(std::move(callback)), num_iterations_(num_iterations) {} + + bool run(const std::shared_ptr& graph); + + private: + void collectLoop(Node* n); + void collectLoops(Block* block); + void peelLoops(); + + std::function callback_ = nullptr; + Node* in_loop_ = nullptr; + std::list loops_to_peel_; + size_t num_iterations_ = 1; +}; +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h new file mode 100644 index 0000000000000000000000000000000000000000..a79bb56492855b6a9002fe82f9c7b9856092af51 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// This pass removes 'grad_of' nodes, replacing them with conditionals of +// the form: +// if any_defined(inputs): +// outputs = +// else: +// outputs = undefineds +TORCH_API void LowerGradOf(Graph& g); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..6c9ea6666835a26e39ddf82830d1c43b7cd45748 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +using ModulePtr = c10::intrusive_ptr; + +// Given a graph with of a method which first argument is %self, lower it to a +// graph where all attributes accesses are replaced with explicit inputs of the +// graph (rather than results of prim::GetAttr executed on %self). +// +// Returns a tuple (graph, parameters) where the last module.parameters.size() +// inputs to the graph are the trainable parameters used in this method. The +// remaining inputs are the true inputs to the function. +TORCH_API std::pair, std::vector> LowerGraph( + Graph& graph, + const ModulePtr& self); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..30e4825cedd17962a7f90dfa93e6a7b5dba319cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h @@ -0,0 +1,17 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { +TORCH_API void metalInsertPrePackedOps(std::shared_ptr& graph); +TORCH_API void metalInsertPrePackedOps(script::Module& module); +TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module); +TORCH_API void metalFoldPrePackingOps(script::Module& module); +TORCH_API script::Module metalOptimizeForMobile( + const script::Module& module, + const std::vector& preserved_methods); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..30d02332429a81ec50c6dc3fae8ab7cddff88714 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include + +#if AT_MKLDNN_ENABLED() + +#include + +#endif // AT_MKLDNN_ENABLED() + +namespace torch { +namespace jit { + +#if AT_MKLDNN_ENABLED() + +namespace mkldnn { + +const static std::map> + fusion_rewrite_map = { + {"none", {}}, + {"relu", {}}, +}; + +} // namespace mkldnn + +#endif // AT_MKLDNN_ENABLED() + +void FuseConvWithEltwise(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4d630392ca47df0d0a32ef1bf5d25bbb4a41c163 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// This pass converts aten ops to a normalized form. It is +// run immediately after IR generation in both the tracer and compiler, +// so downstream consumers of the IR do not need handle ops in their +// pre-normalized form. +// Currently only handles normalization of op aliases. +TORCH_API void NormalizeOps(const std::shared_ptr& graph); + +const std::unordered_map& getOperatorAliasMap(); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..aeb79470b01ae60e38282b4d29b6942af4189ac5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace jit { +namespace fuser { +namespace onednn { + +static std::atomic onednn_enabled{true}; + +static std::atomic& getLlgaEnabled() { + return onednn_enabled; +} + +TORCH_API void fuseGraph(std::shared_ptr& g); + +} // namespace onednn +} // namespace fuser + +struct C10_EXPORT RegisterLlgaFuseGraph + : public PassManager { + static bool setEnabled(bool enabled) { + TORCH_CHECK( + AT_MKLDNN_ENABLED(), + "Running oneDNN Graph fuser is only supported with MKLDNN builds."); + bool oldState = fuser::onednn::getLlgaEnabled(); + fuser::onednn::getLlgaEnabled() = enabled; + if (enabled) { + registerPass(fuser::onednn::fuseGraph); + } else { + clearPass(); + } + return oldState; + } + + static bool isEnabled() { + return fuser::onednn::getLlgaEnabled(); + } + + // override PassManager::registerPass to register pre-pass + static bool registerPass(GraphPass p) { + if (!isRegistered()) { + passID(registerPrePass(std::move(p)), true); + isRegistered(true); + return false; + } + return true; + } + + // override PassManager::clearPass to clear pre-pass + static void clearPass() { + if (isRegistered()) { + clearPrePass(passID()); + isRegistered(true); + } + } +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..11bee679164043cca58fd3f35a108fd078101a95 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API std::shared_ptr ToONNX( + std::shared_ptr& state, + ::torch::onnx::OperatorExportTypes operator_export_type); +TORCH_API std::unordered_map BlockToONNX( + Block* old_block, + Block* new_block, + ::torch::onnx::OperatorExportTypes operator_export_type, + std::unordered_map& env, + bool is_sub_block = false); +TORCH_API void NodeToONNX( + Node* old_node, + Block* new_block, + ::torch::onnx::OperatorExportTypes operator_export_type, + std::unordered_map& env); +TORCH_API void RemovePrintOps(std::shared_ptr& graph); +TORCH_API void PreprocessCaffe2Ops(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..8585c6ecdb3de5c0ce1a3a72be9d722d2b423f0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h @@ -0,0 +1,136 @@ +#pragma once + +#include + +/* `getCustomPrePasses()` returns a vector of passes that will be executed + * after differentiation but before any fusion. This is the de-facto location + * for compiler backends to insert passes. + * + * `getCustomPostPasses()` returns a vector of passes that will be + * executed after differentiation and after fusion (if any). This is the + * location for fusion cleanup passes if they are needed. + * + * Static registration of a pass can be done by creating a global + * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit. + * + * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which + * modify the IR graph in place. + */ + +namespace torch { +namespace jit { + +// A pass modifies a Graph in place. +using GraphPass = std::function&)>; + +// Since Passes are std::functions, we associate a UUID to each pass, this way +// if we want to deregister a pass, we have something to reference it by. +using GraphPassNameType = unsigned int; + +// Graph pass entries have a name associated with them +using GraphPassEntry = std::pair; + +// Return currently registered passes. Passes are stored in a static vector +TORCH_API std::vector>& +getCustomPostPasses(); +TORCH_API std::vector>& +getCustomPrePasses(); + +TORCH_API GraphPassNameType registerPostPass(GraphPass p); +TORCH_API GraphPassNameType registerPrePass(GraphPass p); + +// Look up pass by name passed in, remove it from registered passes +TORCH_API void clearPostPass(GraphPassNameType p); +TORCH_API void clearPrePass(GraphPassNameType p); + +// Remove all passes +TORCH_API void clearAllPostPasses(); +TORCH_API void clearAllPrePasses(); + +// LEGACY CALL +struct TORCH_API RegisterPostPass { + RegisterPostPass(GraphPass p); +}; + +using RegisterPass = RegisterPostPass; + +/* + * PassManager is a wrapper on the register/clear PostPass functions above. It + * will register the pass provided in "registerPass" and will hold on to its + * associated name that way clearPass can be later called and will delete the + * pass used to register when called. + * + * PassManager is templated because we want static variables based on a + * particular GraphPass. When deriving from PassManager, you should send as the + * template parameter your derived class as you would for the curiously + * recurring template pattern. This template parameter isn't actually used and + * is simply done to prevent static members from being shared across derived + * types. + */ +template +struct C10_EXPORT PassManager { + private: + // We want this class to be abstract because it's + virtual void abstract() = 0; + + protected: + /* + * isRegistered() will return if a pass has been registered + * isRegistered(true) will change the value of the internal static bool + * + * There's an internal static bool to this function to keep track of the + * state, this is so when functions are derived from this class, they don't + * have to worry about initializing the static members. + */ + static bool isRegistered(bool flip_bit = false) { + static bool val = false; + if (flip_bit) + val = !val; + return val; + } + + /* + * name() will return the name of the registered pass + * name(pass_name, true) will set the name of the pass + * Similarly to isRegistered we use an internal static variable to hold the + * name. + */ + static GraphPassNameType passID( + GraphPassNameType PassID = 0, + bool set = false) { + static GraphPassNameType pass_id = 0; + if (set) + pass_id = PassID; + return pass_id; + } + + public: + // registerPass(pass) will register the pass provided and set the + // name/isRegistered functions appropriately, it returns a bool value + // indicating whether the given pass is already registered previously. + static bool registerPass(GraphPass p) { + if (!isRegistered()) { + // If we don't already have a registered pass, register pass + // hold on to its name, change isRegistered to true + passID(registerPostPass(std::move(p)), true); + isRegistered(true); + return false; + } + return true; + } + + // Calls ClearPostPass(passID()) + static void clearPass() { + // If the pass is registered, clear it and change isRegistered to false. + if (isRegistered()) { + clearPostPass(passID()); + isRegistered(true); + } + } + + // clang-tidy requires virtual destructor; + virtual ~PassManager() = default; +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h new file mode 100644 index 0000000000000000000000000000000000000000..e2d8d5f9a9f2082fe0eb94bb5aee4a7605dc7042 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool PeepholeOptimize( + const std::shared_ptr& graph, + bool disable_shape_peepholes = false); +// return true if graph is modified +TORCH_API bool PeepholeOptimize( + Block* block, + bool disable_shape_peepholes = false); +// return true if graph is modified +TORCH_API bool FuseAddMM(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h new file mode 100644 index 0000000000000000000000000000000000000000..d61a0a4ec0d8be8e1c3e49e352b288a4767b9ed0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes alias sensitive peepholes +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified +// Optimizes on TensorType if shape_peepholes is true +TORCH_API bool PeepholeOptimizeAliasSensitive( + const std::shared_ptr& graph, + bool shape_peepholes); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h new file mode 100644 index 0000000000000000000000000000000000000000..283c313d9ee2ae8024ba286d1a5bd0ea5cf1fdd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes Dict Ops such as len() and __getitem__ +// 1. getitem optimizations +// Given a function like this: +// def foo(): +// d = {0 : 1} +// x = d[0] +// return x +// This pass produces (after dead code elimination): +// def foo(a, b): +// return 1 +// +// This optimization can only happen if the dict is not modified +// and the dict has constant, non overlapping keys. +// +// 2. len optimizations +// Given a function like this: +// def foo(): +// d = {0 : 1} +// return len(d) +// This pass produces (after dead code elimination): +// def foo(): +// return 1 +// +// This has the same requirements as the getitem optimizations. +// +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified. +TORCH_API bool PeepholeOptimizeDictIdioms(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h new file mode 100644 index 0000000000000000000000000000000000000000..d20df9571db01e0e0a0a3991b410621bfcb346ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h @@ -0,0 +1,72 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes List ops such as len(li) and li[1]. +// 1. Construct/Unpack optimizations +// Given a function like this: +// def foo(a, b): +// li = [a, b] +// x, y = li +// return x, y +// This pass produces (after dead code elimination): +// def foo(a, b): +// return a, b +// +// This is only applied to lists that are not modified. +// +// 2. getitem optimizations +// Given a function like this: +// def foo(a, b): +// li = [a, b] +// x = li[0] +// return x +// This pass produces (after dead code elimination): +// def foo(a, b): +// return a +// +// This optimization can only happen if the list is not modified. +// +// 3. len optimizations +// Given a function like this: +// def foo(): +// li = [1, 2] +// return len(li) +// This pass produces (after dead code elimination): +// def foo(): +// return 2 +// +// This has the same requirements as the getitem optimizations. +// +// 4. ListConstruct + ListConstruct +// Given a function like this: +// def foo(): +// return [1, 2] + [3, 4] +// This pass produces (after dead code elimination): +// def foo(): +// return [1, 2, 3, 4] +// +// This is only applied to lists that are not modified. +// +// 5. Slice +// Given a function like this: +// def foo(): +// return [1, 2, 3, 4, 5][0:2] +// This pass produces (after deadcode elimination): +// def foo(): +// return [1, 2] +// +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified. +// If `refine_list_len` is true will attempt to refine the len of lists through +// len comparisons and assertions. This does not generally optimize pytorch +// programs so it is not called by default in PeepholeOptimize. +TORCH_API bool PeepholeOptimizeListIdioms( + const std::shared_ptr& graph, + bool refine_list_len = false); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..1e4daebd060cc9365c8994219803a65891c69d4e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +// Optimizing General Graph Patterns that +// are not covered in peephole.cpp and peephole_list_idioms +TORCH_API bool PeepholeOptimizeNonTensor(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..13761dc5473efd83c02339a8205cb4cfeb8038f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +using PrePackingOpsFilterFn = std::function; + +void PrePackingOpsFolder( + script::Module& m, + const PrePackingOpsFilterFn& is_foldable_op, + const std::string& attr_prefix); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h new file mode 100644 index 0000000000000000000000000000000000000000..d73addbc387f6b4d55360480b3c20fc1e0b84d3c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +/** \brief Backend specific pass to fuse dequantize - op - quantize calls + * as quantized_op calls. + * + * Right now this is a fusion for fbgemm backend and only works for quantized + * conv op, we'll extend to more ops and more backends in the future. + * + * Currently supported fusion: + * q(conv2d(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_conv2d(prepack(to_nhwc(a)), + * prepack(to_nhwc(w)), + * prepack(to_nhwc(b)))) + * + * q(linear(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_linear(prepack(to_nhwc(a)), + * prepack(to_nhwc(w)), + * prepack(to_nhwc(b)))) + * + * \param graph the graph we want to apply fusion + */ +TORCH_API void QuantFusion( + std::shared_ptr& graph, + QuantType quant_type = QuantType::STATIC); + +/** \brief Insert prepack and unpack function in graph + * We want add pack/unpack functions for quantized weight because later we want + * to fold the packed weight as an attribute of the module, in order to reduce + * the cost of packing the weight on the fly in quantized models. + * + * Each quantized op has it's corresponding prepack/unpack function, + * right now, we only need to do prepack/unpack for quantized::linear + * and quantized::conv2d. + */ +TORCH_API void InsertPrepackUnpack(std::shared_ptr& graph); + +/** \brief Insert pack and unpack function in all graphs + * of module + * + * Go through graphs of all the methods of all child modules + * and call InsertPrepackUnpack on the graph. + */ +TORCH_API void InsertPrepackUnpack(Module& module); + +TORCH_API script::Module Finalize( + script::Module& module, + QuantType quant_type = QuantType::STATIC, + const std::vector& preserved_attrs = + std::vector()); + +TORCH_API void FoldQuantizedPrepackingOps(Module& module); + +TORCH_API Module FinalizeOnDevicePTQ( + Module& module, + QuantType quant_type, + const std::string& method_name); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h new file mode 100644 index 0000000000000000000000000000000000000000..ac4afe90ed9ea577a760becf2a2d760a6bd74d60 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h @@ -0,0 +1,15 @@ +#pragma once +#include +#include + +namespace torch { +namespace jit { + +// Quantization type (dynamic quantization, static quantization). +// Should match the Python enum in quantize_jit.py +enum QuantType : std::uint8_t { DYNAMIC = 0, STATIC }; + +std::ostream& operator<<(std::ostream& os, QuantType t); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h new file mode 100644 index 0000000000000000000000000000000000000000..75f36313b3a1510dec9ec8107b1e5d1c2a781c49 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// updates the types of tuples according to the type of their current inputs. +TORCH_API void RefineTupleTypes(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..19cf29d5de290be1d1b73fffef0b2d2aaadb5f38 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void removeDropout(std::shared_ptr& graph); + +TORCH_API void removeDropout(script::Module& module); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..e029383379f5658208a1f5806710bba7d47ce6b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Considering prim::RaiseException nodes unreachable, simplify prim::If nodes +// when one of the branches contains prim::RaiseException. +// +// This pass is illegal in general case as the modified graph might not throw +// an exception that the original graph would throw. The purpose of the pass is +// to cleanup the graph in a "risky" way by removing pathways leading to +// RaiseExceptions nodes. In some sense, this pass could be considered as a +// "Release" mode, while the original graph was in a "Debug" mode. +// The pass should only be used when such transformation is guaranteed to be +// safe by some other mechanisms. For instance, when we know exact shapes of +// tensors flowing through the graph and tensors with such shapes never cause +// exceptions. +TORCH_API void EliminateExceptions(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h new file mode 100644 index 0000000000000000000000000000000000000000..8a484a839e552dc02a4a04e660d9d173e473f2c5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void RemoveExpands(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h new file mode 100644 index 0000000000000000000000000000000000000000..eb8cf195ee4ca19ce399435e8586d4eecb8b3397 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API MutationRemover { + MutationRemover( + std::shared_ptr graph, + c10::optional> mutation_filter = c10::nullopt) + : mutation_filter_(mutation_filter), + aliasDb_(nullptr), + graph_(std::move(graph)) {} + + // return true if graph is modified + bool removeListMutation(); + + // return true if graph is modified + bool removeTensorMutation(); + + bool isSpecialMappedOp(Node* n) { + return n->matches("aten::zero_(Tensor(a!) self) -> Tensor(a!)") || + n->matches( + "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)") || + n->matches( + "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)"); + } + + bool inplaceOpVariant(Node* n); + + static bool hasSideEffectOrAlias(Value* v, AliasDb* aliasDb); + + private: + Node* createSpecialMappedOp(Node* n); + bool listMutationFollowingListConstruct(Node* n); + bool tryMakeCreationAndMutationAtomic( + Value* mutated_value, + Node* mutating_op); + bool tryMakeUnaliasedIfOutputAndMutationAtomic( + Value* mutated_value, + Node* mutating_op); + // return true if graph is modified + bool RemoveListMutation(Block* block); + // return true if graph is modified + bool RemoveTensorMutation(Block* block); + + AliasDb* getOrCreateAliasDb() { + if (!aliasDb_) { + aliasDb_ = std::make_unique(graph_); + } + return aliasDb_.get(); + } + + c10::optional> mutation_filter_; + std::unique_ptr aliasDb_ = nullptr; + std::shared_ptr graph_; +}; + +// Removes list mutation with functional equivalents +// return true if graph is modified +TORCH_API bool RemoveListMutation(const std::shared_ptr& graph); + +// Replaces in-place aten ops with their functional equivalents +// when it can be proven that this does not change graph semantics +// if `mutation_filter` is present, the pass will only attempt to +// remove mutation on nodes which return true for the filter +// return true if graph is modified +TORCH_API bool RemoveTensorMutation( + const std::shared_ptr& graph, + c10::optional> mutation_filter = c10::nullopt); + +// Replaces in-place aten activation ops with their functional equivalence +TORCH_API bool InplaceToFunctionalActivation( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h new file mode 100644 index 0000000000000000000000000000000000000000..1f3fbf6cac88d082a1a55ae0b9b85667f8d83561 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Find the valid upgrader graph for the upgrader and cache the result +// for later lookups. Will error out if there is no valid upgrader graph +// provided for the upgrader name. +std::shared_ptr getUpgraderGraph(const std::string& upgrader_name); + +TORCH_API void ReplaceOldOperatorsWithUpgraders(std::shared_ptr graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..c7b80423dc5eafd88eb8f22255e472fda0d954ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace jit { + +struct Graph; +struct ArgumentSpec; + +TORCH_API void PropagateRequiresGrad(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h new file mode 100644 index 0000000000000000000000000000000000000000..48ce9fdb9ed208441959974b015026bda98f7f06 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// A map which stores if an activation operator can perform type promotion +const std::unordered_map activation_type_promotion_mapping = { + {aten::sigmoid, true}, + {aten::tanh, true}, + {aten::celu, false}, + {aten::elu, false}, + {aten::gelu, false}, + {aten::glu, false}, + {aten::hardshrink, false}, + {aten::hardsigmoid, false}, + {aten::hardswish, false}, + {aten::hardtanh, false}, + {aten::leaky_relu, false}, + {aten::prelu, false}, + {aten::relu6, false}, + {aten::relu, false}, + {aten::rrelu, false}, + {aten::selu, false}, + {aten::silu, false}}; + +class FunctionalToInplaceRewriter { + public: + FunctionalToInplaceRewriter(std::shared_ptr graph); + + bool FunctionalToInplace(Block* block); + + private: + AliasDb* getOrCreateAliasDb() { + if (!aliasDb_) { + aliasDb_ = std::make_unique(graph_); + } + return aliasDb_.get(); + } + + bool CanBeInplace(Node* node); + + std::unique_ptr aliasDb_ = nullptr; + std::shared_ptr graph_; +}; + +// A common application scenario is to apply InplaceToFunctionalActivation +// before some JIT optimization passes, so that those passes are less +// constrained by in-place ops. After those passes are done, we can call +// FunctionalToInplaceActivation to recover in-place activation ops, +// so that we won't lose the performance benefit coming from memory reduction. + +// Replaces functional aten activation ops with their in-place equivalents +TORCH_API bool FunctionalToInplaceActivation( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..670072a0b09b45337cd8bd80eb5bd9e12ee7f0dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Graph; + +struct propagation_error : std::exception {}; + +class PropertyPropBase { + // Used for both Shape Propagation and Dtype/Device Propagation + public: + explicit PropertyPropBase(std::shared_ptr graph) + : graph_(std::move(graph)) {} + virtual ~PropertyPropBase() = default; + + void propagateBlock(Block* block, bool insert_expands = true); + // insert_expands is used for shape inference + + void processIf(Node* node); + void processLoop(Node* node); + + protected: + virtual void propagateNode(Node* node, bool insert_expands = true) = 0; + void setUnshapedType(Value* o); + void setUnshapedType(Node* node); + std::shared_ptr graph_; +}; + +TORCH_API void EraseShapeInformation(const std::shared_ptr& graph); +TORCH_API void PropagateInputShapes(const std::shared_ptr& graph); + +TORCH_API bool mergeTypes( + ArrayRef lhs, + ArrayRef rhs, + ArrayRef outputs); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h new file mode 100644 index 0000000000000000000000000000000000000000..83b5e657750b831bb4891569ef1b71cd87a95d1a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// propagate autograd zero information through a gradient graph and +// remove grad_of blocks if present. +// Note: this is a very limited pass. It only propagates autograd zeros for +// operations generated by the symbolic autodiff code and cleans up +// AutogradAdds when possible. Outputs of other nodes are conservatively +// marked Unknown and not optimized. +TORCH_API void specializeAutogradZero(std::shared_ptr g); + +struct ProfilingRecord; + +TORCH_API void InsertProfileNodesForSpecializeAutogradZero(ProfilingRecord* pr); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..d932c0c1f74fa73b14e8d041a55e8a82d33bdd62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h @@ -0,0 +1,117 @@ +/** This file defines API for pattern-based subgraph rewrites. + * + * The API can be used for finding concrete patterns in the model and replacing + * the corresponding subgraphs with another subgraph. A special case of such + * rewrites is fusion, where the new subgraph consists of just a single node. + * + * There is a default set of the most common patterns that everyone could use. + * Alternatively, an arbitrary pattern can be registered. + */ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +// Forward declarations. +struct RewritePatternDescr; +struct Match; + +using MatchFilter = std::function< + bool(const Match&, const std::unordered_map&)>; + +/** Run pattern-based subgraph rewrites on all methods in the module. + * + * This pass will go through all methods in the module and try to replace all + * recognized patterns (see SubgraphRewriter::RegisterDefaultPatterns for the + * list of these patterns). + */ +TORCH_API Module PatternBasedRewrite(const Module& module); + +/** A class implementing API for pattern-based subgraph rewrites. + * + * To perform pattern-based subgraph rewrites on a module using this API, one + * needs to create an object of such class, register rewrite patterns and run + * the transformation pass (`runOnModule`). + * + * To use standard patterns, one could use `RegisterDefaultPatterns`. + * + * To enable rewrites of custom patterns, the custom patterns must be registered + * with `RegisterRewritePattern`. + */ +class TORCH_API SubgraphRewriter { + public: + // Run pattern-based subgraph rewrite pass on the module. + Module runOnModule(const Module& module); + + // Run pattern-based subgraph rewrite pass on the graph (used in testing). + // `filter` is a function that does extra filtering on the match. If it + // returns false for a given Match, we'll skip the Match. The filter + // function's arguments consist of a Match and a value map from parsing the + // pattern graph. Both the Match and the value map are necessary because we + // need to 1) do extra filtering on the matched result as well as 2) refer to + // the values in the matched result through the values in the pattern graph. + void runOnGraph( + std::shared_ptr& graph, + const std::vector& filters); + + void runOnGraph( + std::shared_ptr& graph, + const MatchFilter& filter = + [](const Match&, const std::unordered_map&) { + return true; + }) { + runOnGraph(graph, std::vector({filter})); + } + + // Register standard rewrite patterns. + void RegisterDefaultPatterns(); + + /** Register a custom rewrite pattern. + * + * The method takes two parameters specifying the pattern: + * \p PATTERN - IR string representing the pattern subgraph. + * \p REPLACEMENT - IR string representing the replacement subgraph. + * \p value name map - vector of pairs mapping values in the replacement graph + * to the values in the pattern graph. Used for preserving source range info + * across graph rewrite. + * + * See examples of pattern registering in `RegisterDefaultPatterns`. + */ + void RegisterRewritePattern( + const std::string& pattern, + const std::string& replacement, + const std::vector>& value_name_pair = + {}); + + private: + std::vector patterns_; + std::unordered_set nodes_to_delete_; + + void rewriteSinglePatternOnGraph( + std::shared_ptr& graph, + const RewritePatternDescr& pattern, + const std::vector& filters); + + bool overlapsWithPreviousMatches(const Match* match); +}; + +/** Rewrite pattern descriptor. + * + * This structure is used in the implementation of `SubgraphRewriter` and + * is not supposed to be used externally. + */ +struct RewritePatternDescr { + std::string pattern; + std::string replacement; + std::unordered_map value_name_map; +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..824740792aaf031a0adcc181cb84a666ef539fe4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE + +TORCH_API void PropagateShapesOnGraph(std::shared_ptr& graph); + +// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE +// From [beg, end) attempt to propagate shapes and +// build up a graph that will compute all remaining symbolic +// shapes in [beg, end) that can be executed before beg + +struct ShapeComputeGraphMapping { + ShapeComputeGraphMapping( + std::shared_ptr partial_eval_shape_graph, + std::unordered_map + enclosing_graph_value_to_shape_graph_input, + std::unordered_map graph_output_to_symbolic_shape_dim) + : partial_eval_shape_graph(std::move(partial_eval_shape_graph)), + enclosing_graph_value_to_shape_graph_input_( + std::move(enclosing_graph_value_to_shape_graph_input)), + graph_output_to_symbolic_shape_dim_( + std::move(graph_output_to_symbolic_shape_dim)){}; + + std::shared_ptr partial_eval_shape_graph; + std::unordered_map + enclosing_graph_value_to_shape_graph_input_; + std::unordered_map graph_output_to_symbolic_shape_dim_; +}; + +TORCH_API c10::optional +PropagateShapesAndBuildLargeShapeComputeGraph( + std::shared_ptr& graph, + Node* beg, + Node* end); + +// don't insert complete tensor shapes in shape compute graphs and instead +// rely on our partial evaluation pipeline to propagate information. +// this is a good proxy for our ability to propagate non-complete shape +// information. +TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value); +TORCH_API bool symbolicShapeAnalysisTestModeEnabled(); + +using SSAInput = std::variant; +TORCH_API c10::optional> +calculateSymbolicShapesOnOp( + const FunctionSchema* schema, + const std::vector& inputs); +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h new file mode 100644 index 0000000000000000000000000000000000000000..02e00acac08d2d1b625c02524eb51c68569515d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API CanonicalizedSymbolicShape { + // TODO: Consider in the future if it is reasonable to + // merge code with SymbolicShape or VaryingShape while keeping + // the two not implicitly convertable (and cause bugs). + CanonicalizedSymbolicShape( + const c10::SymbolicShape& orig_shape, + std::unordered_map& ss_map) { + init(orig_shape, ss_map); + } + + CanonicalizedSymbolicShape(c10::SymbolicShape& orig_shape) { + std::unordered_map new_ssmap; + init(orig_shape, new_ssmap); + } + + size_t hash() const; + + c10::SymbolicShape toSymbolicShape( + std::unordered_map& inverse_ss_map) const; + + TORCH_API friend bool operator==( + const CanonicalizedSymbolicShape& a, + const CanonicalizedSymbolicShape& b); + + private: + c10::optional> values_; + + void init( + const c10::SymbolicShape& orig_shape, + std::unordered_map& ss_map); +}; + +// SHAPE CACHE API +TORCH_API c10::optional> +get_cached_shape_function( + const FunctionSchema* schema, + const std::vector& arg_vec); + +TORCH_API void cache_shape_function( + const FunctionSchema* schema, + const std::vector& arg_vec, + const std::vector& ret_vec); + +// For use in test code +TORCH_API void clear_shape_cache(); +TORCH_API size_t get_shape_cache_size(); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..d951982fde2990f335ed26e688767b349ac1cb5b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +// Run TensorExpressions-based fuser. +// If add_composed_op is true, creates a single operation that +// performs both the runtime check that types align +// and then the dispatch to the kernel/unoptimized graph +TORCH_API void FuseTensorExprs( + std::shared_ptr& graph, + size_t min_group_size = 2, + bool add_composed_op = false, + bool fuse_to_dynamic_shapes = false); + +TORCH_API void setTensorExprFuserEnabled(bool val); +TORCH_API bool tensorExprFuserEnabled(); +TORCH_API void setTensorExprDynamicShapeFusionEnabled(bool val); +TORCH_API bool tensorExprDynamicShapeFusionEnabled(); +TORCH_API bool setTexprReductionsEnabled(bool value); +TORCH_API bool texprReductionsEnabled(); + +TORCH_API void RemoveProfileNodesAndSpecializeTypes( + std::shared_ptr& graph); +TORCH_API bool hasTensorTypeSpecialization(Value* v); +TORCH_API void RemoveTensorTypeSpecializations(std::shared_ptr& graph); +TORCH_API void removeTensorTypeSpecializations(Block* block); + +using tensor_type_converter_t = + c10::function_ref; + +// inserts a TypeCheck pattern +// +// around the guarded node that has a Subgraph attribute, this inserts a pattern +// +// if TypeCheck(...): +// guarded_node +// else: +// FallbackGraph(...) +// +// The TypeCheck includes the types of all Tensor inputs to the guarded_node, +// as processed by the type_converter, a lambda +// TensorTypePtr(const TensorTypePtr& t). This allows to erase irrelevant +// aspects of the type. +// +// The Fallback graph will have the same subgraph as the guarded node (with the +// expectation that the guarded_node's subgraph will then be optimized. +TORCH_API void insertTypeGuard( + Node* guarded_node, + tensor_type_converter_t type_converter, + c10::Symbol kind); + +TORCH_API bool usedOnlyInSize(Value* v); +TORCH_API Value* broadcastSizes(at::ArrayRef sizes, AliasDb* db); + +namespace tensorexpr { +TORCH_API bool isSupported(Node* node); + +/// Get the modifiable custom operator set object. +/// +/// For static shapes, if a custom operator has been added to the custom +/// operator set, it will be pulled into the NNC fusion group. But it doesn't +/// work with dynamic shapes unless explicitly register the shape function via +/// `torch::jit::RegisterShapeComputeGraphForSchema` for the custom operator. +/// +/// @return Reference of the custome operator set +/// +TORCH_API OperatorSet& getCustomOperatorSet(); +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..eb51ba00c4c9f8d2ca07dd96def6f5e168160e35 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Because differentiable graphs detach the gradients of input Tensors, +// creating and inlining differentiable graphs changes the requires_grad +// property of tensors in the graph. This pass updates prim::profiles +// requires_grad to keep profiled properties up to date, it does not update +// grad properties of other nodes like graph inputs bc the only downstream +// user of the grad property is the profiling executor, which just uses +// the types of prim::profiles +TORCH_API void UpdateDifferentiableGraphRequiresGrad( + std::shared_ptr& diff_forward_graph, + c10::optional new_requires_grad); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd89bbcc22fffffef677a570923d469a6f129bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Try to replace an op that takes a list input with another op that takes a +// variadic number of arguments. +TORCH_API bool UseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +TORCH_API bool RemoveListMutationAndUseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +// Convenient functions for replacing aten::stack/aten::cat with their +// variadic versions. +TORCH_API bool UseVariadicCat(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicCat( + const std::shared_ptr& graph); + +TORCH_API bool UseVariadicStack(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicStack( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..d1a64c52c9230ad85a3c3540e120b48532abd707 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void transformConv1dToConv2d(std::shared_ptr& graph); +TORCH_API void transformConv1dToConv2d(script::Module& module); +TORCH_API void insertPrePackedOps(std::shared_ptr& graph); +TORCH_API void insertPrePackedOps(script::Module& module); +TORCH_API void fusePrePackedLinearConvWithClamp(script::Module& module); +TORCH_API void FoldPrePackingOps(script::Module& module); +TORCH_API script::Module optimizeForMobile( + const script::Module& module, + const std::set& optimization_blocklist = {}, + const std::vector& preserved_methods = {}); +} // namespace jit +} // namespace torch