diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h new file mode 100644 index 0000000000000000000000000000000000000000..0ccdfe2c9ebd992fa7c0c50c01159ce06b7f53d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include +#include +#include + +/* + * This file contains APIs for cuda fuser; + * + * We use an empty static struct to hold the function pointers, which are + * registered separately. This is to support cpu-only compilation. + * Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp + */ + +namespace torch { +namespace jit { +namespace fuser { +namespace cuda { + +TORCH_API std::atomic& getCudaFusionGuardMode(); + +TORCH_API bool getSingletonFusion(); +TORCH_API bool setSingletonFusion(bool value); +TORCH_API bool getHorizontalFusion(); +TORCH_API bool setHorizontalFusion(bool value); + +// dummy struct to allow API registration +struct CudaFuserInterface { + void (*fn_compile_n)(Node*) = nullptr; + void (*fn_run_n_s)(const Node*, Stack&) = nullptr; + void (*fn_fuse_graph)(std::shared_ptr&) = nullptr; + bool (*fn_can_fuse_n)(const Node*) = nullptr; + void (*fn_insert_profile_inodes)(ProfilingRecord* pr) = nullptr; + bool (*fn_profile_n)(const Node*) = nullptr; + bool (*fn_skip_n)(const std::string&, bool flip) = nullptr; +}; + +// Get interface, this is used by registration and user facing API internally +TORCH_API CudaFuserInterface* getFuserInterface(); + +TORCH_API void compileFusionGroup(Node* fusion_node); +TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack); +TORCH_API void fuseGraph(std::shared_ptr&); +TORCH_API bool canFuseNode(const Node* node); +TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr); +TORCH_API bool profileNode(const Node* node); + +TORCH_API bool skipNode(const std::string& symbol_str, bool flip = true); + +TORCH_API bool isEnabled(); +TORCH_API bool setEnabled(bool is_enabled); +TORCH_API bool canBeEnabled(); + +} // namespace cuda +} // namespace fuser +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h new file mode 100644 index 0000000000000000000000000000000000000000..46d90d1a515f66fa19ac37c6d8621ba5f6e687de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API std::shared_ptr Canonicalize( + const std::shared_ptr& graph, + bool keep_unique_names = true); + +TORCH_API void CanonicalizeOutputs(std::shared_ptr& graph); + +TORCH_API c10::optional firstOrLastUse(Value* v, bool find_first); + +TORCH_API bool isBeforeOrAfter( + const Use& a, + const Use& b, + bool checking_before); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..38ae569dbec31541b4ac032fb1637aead4e43204 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CanonicalizeOps(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h new file mode 100644 index 0000000000000000000000000000000000000000..7dee9bdb52ad6c460366953f696480140f219fb6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void unprofileGraphInputs(const std::shared_ptr& graph); +TORCH_API void unprofileBlock(Block* start_block); +// Unprofiles all the node outputs in a block. + +TORCH_API void ClearProfilingInformation(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h new file mode 100644 index 0000000000000000000000000000000000000000..24add48764c58a143ed755c41ec10f315a1b3207 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Undefinedness makes argument matching fail for regular tensor operations +// if 1+ arguments are undefined or possibly undefined tensors. +// Technically, undefined tensors are **not** tensors as the regular tensor +// operations do not know how to handle them. +// However, in practice, there are guards and conversion operators that +// **always** gate regular operations if undefined tensors may be present +// Eventually, we would love to move to the world where we use optionals +// in lieu of undefined tensors. +// When this happens, this pass will be removed +TORCH_API void ClearUndefinedness(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..4fdd86f36a46dd0c1be0c53f1731c8e686ba1796 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Eliminates common inputs among `aten::cat` ops. +TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr& graph); + +// Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies +// in the buffers used for concatenation if possible. +TORCH_API void ExpandConcatAndEliminateRedundancy( + const std::shared_ptr& graph); + +TORCH_API bool CombineConcats(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h new file mode 100644 index 0000000000000000000000000000000000000000..62293c8d7abc9bc2344ccab38d3a30c18af2fe9d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Runs constant propagation on all objects unless ignore_custom_classes is +// specified as true, in which case user defined classes are skipped. This is +// useful to prevent early fusion of packing operations, which end up lowering +// away information about their constructors (e.g. packed::linear_clamp_prepack +// and prepacked::conv2d_clamp_prepack) +// Returns True if the pass made a change to the graph +TORCH_API bool ConstantPropagation( + std::shared_ptr& graph, + bool ignore_custom_classes = false); + +// runs constant propagation only on ops that have non-aliasing inputs & outputs +// Returns True if the pass made a change to the graph +TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr& graph); + +// Runs the node if its inputs are constants. Callers of this function must +// make their own determination if constant prop is appropriate - for example +// non-deterministic ops or ops with side effects. If ignore_custom_classes is +// specified, nodes that output user defined classes are not run. +TORCH_API c10::optional runNodeIfInputsAreConstant( + const Node* node, + bool ignore_custom_classes = false, + AliasDb* db = nullptr); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h new file mode 100644 index 0000000000000000000000000000000000000000..481b2aa352107bc74f776b7bcd3bb24251b80c0b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace jit { + +// insert GraphExecutor nodes that group together +// subgraphs that are differentiable by the jit's autodiff passes +// threshold - minimum number of nodes that will appear in a block +// returns all differentiable blocks that have been found +TORCH_API std::vector CreateAutodiffSubgraphs( + const std::shared_ptr& graph, + size_t threshold = 2); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h new file mode 100644 index 0000000000000000000000000000000000000000..351816394d80c694d30a2423d8774d3585318af9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void CreateFunctionalGraphs(const std::shared_ptr& graph); + +TORCH_API void InlineFunctionalGraphs(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..159592a6c6672dd2fccf0768496aea1de44f1ff9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +struct Graph; + +// Propagates Device type info throughout the given graph. +TORCH_API bool DeviceTypePropagation(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0087d306c23bc9b4580d669ec8009ec8b83b9e79 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Remove ops that do nothing on the forward pass (like aten::detach). +// This pass is invoked as a part of freeze_module. +// This function also takes a set of custom ops to eliminate. All ops in this +// set must take their output as their first input, i.e. x = f(x, ...) +TORCH_API bool EliminateNoOps( + std::shared_ptr& graph, + std::unordered_set custom_ops = {}); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4aef1f5570694d20141ecc2e04a37eaf2ef0d3b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Erase NumberType information. This is necessary for and only used in +// exporting to ONNX. This pass ensures that no remaining Values have +// NumberType types, replacing them with tensors. +// The following things are done to erase NumberType info: +// - NumberType outputs are changed to DynamicType. +// - prim::Constant nodes which are numbers get changed into 0-dim tensors of +// the corresponding type +// - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes +// are erased. +// +// The pass assumes that DCE will be called sometime after. +TORCH_API void EraseNumberTypes(const std::shared_ptr& graph); +TORCH_API void EraseNumberTypesOnBlock(Block* block); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h new file mode 100644 index 0000000000000000000000000000000000000000..704a0915116286ace337974c449e9a635fca4053 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h @@ -0,0 +1,29 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +struct TORCH_API LinearBNParameters { + at::Tensor linear_w; + at::Tensor linear_b; + at::Tensor bn_rm; + at::Tensor bn_rv; + double bn_eps = 0.0; + at::Tensor bn_w; + at::Tensor bn_b; +}; + +/** + * Given the current weight and bias tensors of a Linear module and parameters + * of the BatchNorm module we're folding with, compute the updated values + * for the weight and bias. + * + * The function is basically copied from torch/nn/utils/fusion.py + */ +TORCH_API std::tuple computeUpdatedLinearWeightAndBias( + const LinearBNParameters& p); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h new file mode 100644 index 0000000000000000000000000000000000000000..7a50519cd92d5b536d76c851e87d31a2c5911cf8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h @@ -0,0 +1,36 @@ +/** \brief This file defines freezing Torchscript module API. + * + * This API has python-binding and can be invoked directly or as a part of + * general optimization pipeline. + */ +#pragma once + +#include +#include + +/** \brief Freeze Module, i.e., Assume all attributes are constants. + * + * Freezing module is a functionality that allows the JIT to internalize + * immutable attributes. Combined with inlining, the module is aggressively + * optimized and significant overhead is optimized away. The freezeModule API + * produces a cloned frozen module. + */ + +namespace torch { +namespace jit { + +TORCH_API Module freeze_module( + const Module& module, + std::vector preservedAttrs = std::vector(), + bool freezeInterfaces = true, + bool preserveParameters = false); + +// Clone-free version of freeze_module. This modifies the module inplace. +// Use this version to avoid extra memory usage incurred by cloning the module. +TORCH_API void freeze_module_inplace( + Module* module, + std::vector preservedAttrs = std::vector(), + bool freezeInterfaces = true, + bool preserveParameters = false); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..d64cda0a88f2bbc0ebe585bf8b63e5bd94744743 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Concats multiple linear ops with the same Tensor input +// into a single linear op. +TORCH_API bool FrozenConcatLinear(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..95991e73d9eccf7473071c5ed352af56d7c114f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API extern std::function&)>& +getFuseFrozenConvAddReluImpl(); + +TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..65dc138ccd6a41be0fa709516c97a8b89eeafd98 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Fuses Convolution -> Batchnorm into a single Convolution by +// folding batchnorm weights into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr& graph); + +// Fuses Convolution -> Add/Sub into a single Convolution by +// folding add constant tensor into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr& graph); + +// Fuses Convolution -> Mul/Div into a single Convolution by +// folding add constant tensor into conv weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..bac4bedd53a6bb45999d5d276986d0946e1a2f0b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Fuses Linear -> BatchNormNd into a single Linear by +// folding batchnorm weights into linear weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h new file mode 100644 index 0000000000000000000000000000000000000000..d6ffc36906ad7e51b3fb1bc940cac69e8fb1b433 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Converts operators & their parameters to mkldnn if it is profitable +// Currently encompassing Conv2d and Conv3d, and Linear +// Op must be in float32 and mkldnn must be built +// This pass only works on frozen graph +TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..6577431368e9b9b8d99dd5995fc805d6b4d2d742 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +TORCH_API void FuseAddRelu(script::Module& module); +TORCH_API void FuseAddRelu(std::shared_ptr& graph); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..aafb442eafb6f5e1b1e506c06627e9e9a03a5eed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool canFuseOnCPULegacy(); +TORCH_API void overrideCanFuseOnCPULegacy(bool value); + +// NB: Be sure to run DCE before fusion, because dead instructions +// can prevent fusion opportunities from being exploited. +// On Windows will noop, NYI +TORCH_API void FuseGraph( + std::shared_ptr& graph, + bool strict_fuser_check = false); + +// \brief Custom fusion pass using a node-level callback to +// determine the inclusion of nodes in a subgraph. +// +// This helper omits aliased inputs and fusion across control flow +// boundaries. +// +// \arg graph The graph to be modified in-place +// \arg is_fusable A callback run on each fusable node in the graph. +// \arg kind The label given to the resultant fused subgraph +// \arg arg_limit The maximum number of args the resultant fused subgraph +// should have. Note: This will likely develop into a general +// post condition on the fused subgraph. +TORCH_API void CustomFuseGraph( + std::shared_ptr& graph, + const std::function& is_fusable, + Symbol kind, + size_t arg_limit = std::numeric_limits::max()); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h new file mode 100644 index 0000000000000000000000000000000000000000..8edc81224a07321786937bdebde0d19c5c119c22 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool canRunWithAutograd(Node* node); + +TORCH_API void InlineAutodiffSubgraphs( + std::shared_ptr& graph, + size_t threshold = 5); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h new file mode 100644 index 0000000000000000000000000000000000000000..164c29f8b6557f50b02401704c2622dc187d86aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void inlineForkedClosures(std::shared_ptr& to_clean); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h new file mode 100644 index 0000000000000000000000000000000000000000..b4db0ad189282d83a2e184993e1c790a41527bf3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Inline function and method calls. +TORCH_API void Inline(Graph& graph); + +TORCH_API GraphFunction* tryToGraphFunction(Node* n); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h new file mode 100644 index 0000000000000000000000000000000000000000..6d22d173002f5201a80a839d6d6923d713ac951e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CheckInplace(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h new file mode 100644 index 0000000000000000000000000000000000000000..5614e96c141f4b611418fec08ce917868728eef1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool RefineIntegerValues(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h new file mode 100644 index 0000000000000000000000000000000000000000..5895f2fcee7462b8f9627651e58590d96299ac93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool UnrollLoops(std::shared_ptr& graph); + +// Only unrolls constant loops. Will unroll them regardless of loop block size +TORCH_API bool UnrollConstantLoops(std::shared_ptr& graph); + +TORCH_API Node* PeelLoop(Node* n, size_t times); + +// return true if graph is modified +TORCH_API bool PeelProfilingLoops(const std::shared_ptr& graph); + +struct TORCH_API LoopsPeeler { + LoopsPeeler(std::function callback, size_t num_iterations = 1) + : callback_(std::move(callback)), num_iterations_(num_iterations) {} + + bool run(const std::shared_ptr& graph); + + private: + void collectLoop(Node* n); + void collectLoops(Block* block); + void peelLoops(); + + std::function callback_ = nullptr; + Node* in_loop_ = nullptr; + std::list loops_to_peel_; + size_t num_iterations_ = 1; +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h new file mode 100644 index 0000000000000000000000000000000000000000..a79bb56492855b6a9002fe82f9c7b9856092af51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// This pass removes 'grad_of' nodes, replacing them with conditionals of +// the form: +// if any_defined(inputs): +// outputs = +// else: +// outputs = undefineds +TORCH_API void LowerGradOf(Graph& g); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h new file mode 100644 index 0000000000000000000000000000000000000000..3ac9127b29fb084bb0c1d01d7684ac429e176453 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// removes tuples where TupleConstruct and TupleUnpack are matched +// but leaves tuples in place across if statements, loops, and as inputs/outputs +TORCH_API void LowerSimpleTuples(const std::shared_ptr& graph); + +// removes _all_ tuples and raises an error if some cannot be removed +// this is used by ONNX to ensure there are not tuples before conversion, +// but will not work on graphs whose inputs contain tuples. +TORCH_API void LowerAllTuples(const std::shared_ptr& graph); + +TORCH_API void LowerSimpleTuples(Block* block); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h new file mode 100644 index 0000000000000000000000000000000000000000..d11f288dca343308bf2167c89a3d6b2d0792a569 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +enum class MobileOptimizerType : int8_t { + CONV_BN_FUSION, + INSERT_FOLD_PREPACK_OPS, + REMOVE_DROPOUT, + FUSE_ADD_RELU, + HOIST_CONV_PACKED_PARAMS, + CONV_1D_TO_2D, + VULKAN_AUTOMATIC_GPU_TRANSFER, +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..11bee679164043cca58fd3f35a108fd078101a95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API std::shared_ptr ToONNX( + std::shared_ptr& state, + ::torch::onnx::OperatorExportTypes operator_export_type); +TORCH_API std::unordered_map BlockToONNX( + Block* old_block, + Block* new_block, + ::torch::onnx::OperatorExportTypes operator_export_type, + std::unordered_map& env, + bool is_sub_block = false); +TORCH_API void NodeToONNX( + Node* old_node, + Block* new_block, + ::torch::onnx::OperatorExportTypes operator_export_type, + std::unordered_map& env); +TORCH_API void RemovePrintOps(std::shared_ptr& graph); +TORCH_API void PreprocessCaffe2Ops(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..8585c6ecdb3de5c0ce1a3a72be9d722d2b423f0d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h @@ -0,0 +1,136 @@ +#pragma once + +#include + +/* `getCustomPrePasses()` returns a vector of passes that will be executed + * after differentiation but before any fusion. This is the de-facto location + * for compiler backends to insert passes. + * + * `getCustomPostPasses()` returns a vector of passes that will be + * executed after differentiation and after fusion (if any). This is the + * location for fusion cleanup passes if they are needed. + * + * Static registration of a pass can be done by creating a global + * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit. + * + * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which + * modify the IR graph in place. + */ + +namespace torch { +namespace jit { + +// A pass modifies a Graph in place. +using GraphPass = std::function&)>; + +// Since Passes are std::functions, we associate a UUID to each pass, this way +// if we want to deregister a pass, we have something to reference it by. +using GraphPassNameType = unsigned int; + +// Graph pass entries have a name associated with them +using GraphPassEntry = std::pair; + +// Return currently registered passes. Passes are stored in a static vector +TORCH_API std::vector>& +getCustomPostPasses(); +TORCH_API std::vector>& +getCustomPrePasses(); + +TORCH_API GraphPassNameType registerPostPass(GraphPass p); +TORCH_API GraphPassNameType registerPrePass(GraphPass p); + +// Look up pass by name passed in, remove it from registered passes +TORCH_API void clearPostPass(GraphPassNameType p); +TORCH_API void clearPrePass(GraphPassNameType p); + +// Remove all passes +TORCH_API void clearAllPostPasses(); +TORCH_API void clearAllPrePasses(); + +// LEGACY CALL +struct TORCH_API RegisterPostPass { + RegisterPostPass(GraphPass p); +}; + +using RegisterPass = RegisterPostPass; + +/* + * PassManager is a wrapper on the register/clear PostPass functions above. It + * will register the pass provided in "registerPass" and will hold on to its + * associated name that way clearPass can be later called and will delete the + * pass used to register when called. + * + * PassManager is templated because we want static variables based on a + * particular GraphPass. When deriving from PassManager, you should send as the + * template parameter your derived class as you would for the curiously + * recurring template pattern. This template parameter isn't actually used and + * is simply done to prevent static members from being shared across derived + * types. + */ +template +struct C10_EXPORT PassManager { + private: + // We want this class to be abstract because it's + virtual void abstract() = 0; + + protected: + /* + * isRegistered() will return if a pass has been registered + * isRegistered(true) will change the value of the internal static bool + * + * There's an internal static bool to this function to keep track of the + * state, this is so when functions are derived from this class, they don't + * have to worry about initializing the static members. + */ + static bool isRegistered(bool flip_bit = false) { + static bool val = false; + if (flip_bit) + val = !val; + return val; + } + + /* + * name() will return the name of the registered pass + * name(pass_name, true) will set the name of the pass + * Similarly to isRegistered we use an internal static variable to hold the + * name. + */ + static GraphPassNameType passID( + GraphPassNameType PassID = 0, + bool set = false) { + static GraphPassNameType pass_id = 0; + if (set) + pass_id = PassID; + return pass_id; + } + + public: + // registerPass(pass) will register the pass provided and set the + // name/isRegistered functions appropriately, it returns a bool value + // indicating whether the given pass is already registered previously. + static bool registerPass(GraphPass p) { + if (!isRegistered()) { + // If we don't already have a registered pass, register pass + // hold on to its name, change isRegistered to true + passID(registerPostPass(std::move(p)), true); + isRegistered(true); + return false; + } + return true; + } + + // Calls ClearPostPass(passID()) + static void clearPass() { + // If the pass is registered, clear it and change isRegistered to false. + if (isRegistered()) { + clearPostPass(passID()); + isRegistered(true); + } + } + + // clang-tidy requires virtual destructor; + virtual ~PassManager() = default; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h new file mode 100644 index 0000000000000000000000000000000000000000..e2d8d5f9a9f2082fe0eb94bb5aee4a7605dc7042 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool PeepholeOptimize( + const std::shared_ptr& graph, + bool disable_shape_peepholes = false); +// return true if graph is modified +TORCH_API bool PeepholeOptimize( + Block* block, + bool disable_shape_peepholes = false); +// return true if graph is modified +TORCH_API bool FuseAddMM(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h new file mode 100644 index 0000000000000000000000000000000000000000..d20df9571db01e0e0a0a3991b410621bfcb346ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h @@ -0,0 +1,72 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes List ops such as len(li) and li[1]. +// 1. Construct/Unpack optimizations +// Given a function like this: +// def foo(a, b): +// li = [a, b] +// x, y = li +// return x, y +// This pass produces (after dead code elimination): +// def foo(a, b): +// return a, b +// +// This is only applied to lists that are not modified. +// +// 2. getitem optimizations +// Given a function like this: +// def foo(a, b): +// li = [a, b] +// x = li[0] +// return x +// This pass produces (after dead code elimination): +// def foo(a, b): +// return a +// +// This optimization can only happen if the list is not modified. +// +// 3. len optimizations +// Given a function like this: +// def foo(): +// li = [1, 2] +// return len(li) +// This pass produces (after dead code elimination): +// def foo(): +// return 2 +// +// This has the same requirements as the getitem optimizations. +// +// 4. ListConstruct + ListConstruct +// Given a function like this: +// def foo(): +// return [1, 2] + [3, 4] +// This pass produces (after dead code elimination): +// def foo(): +// return [1, 2, 3, 4] +// +// This is only applied to lists that are not modified. +// +// 5. Slice +// Given a function like this: +// def foo(): +// return [1, 2, 3, 4, 5][0:2] +// This pass produces (after deadcode elimination): +// def foo(): +// return [1, 2] +// +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified. +// If `refine_list_len` is true will attempt to refine the len of lists through +// len comparisons and assertions. This does not generally optimize pytorch +// programs so it is not called by default in PeepholeOptimize. +TORCH_API bool PeepholeOptimizeListIdioms( + const std::shared_ptr& graph, + bool refine_list_len = false); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..13761dc5473efd83c02339a8205cb4cfeb8038f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +using PrePackingOpsFilterFn = std::function; + +void PrePackingOpsFolder( + script::Module& m, + const PrePackingOpsFilterFn& is_foldable_op, + const std::string& attr_prefix); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..19cf29d5de290be1d1b73fffef0b2d2aaadb5f38 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void removeDropout(std::shared_ptr& graph); + +TORCH_API void removeDropout(script::Module& module); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e597da64860be2ad26186e4862ef2db348cbe1ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace jit { +// see .cpp for docs +TORCH_API void RemoveInplaceOps(const std::shared_ptr& graph); + +TORCH_API void ImplicitCastForBinaryInplaceOps(Block* block); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h new file mode 100644 index 0000000000000000000000000000000000000000..eb8cf195ee4ca19ce399435e8586d4eecb8b3397 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API MutationRemover { + MutationRemover( + std::shared_ptr graph, + c10::optional> mutation_filter = c10::nullopt) + : mutation_filter_(mutation_filter), + aliasDb_(nullptr), + graph_(std::move(graph)) {} + + // return true if graph is modified + bool removeListMutation(); + + // return true if graph is modified + bool removeTensorMutation(); + + bool isSpecialMappedOp(Node* n) { + return n->matches("aten::zero_(Tensor(a!) self) -> Tensor(a!)") || + n->matches( + "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)") || + n->matches( + "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)"); + } + + bool inplaceOpVariant(Node* n); + + static bool hasSideEffectOrAlias(Value* v, AliasDb* aliasDb); + + private: + Node* createSpecialMappedOp(Node* n); + bool listMutationFollowingListConstruct(Node* n); + bool tryMakeCreationAndMutationAtomic( + Value* mutated_value, + Node* mutating_op); + bool tryMakeUnaliasedIfOutputAndMutationAtomic( + Value* mutated_value, + Node* mutating_op); + // return true if graph is modified + bool RemoveListMutation(Block* block); + // return true if graph is modified + bool RemoveTensorMutation(Block* block); + + AliasDb* getOrCreateAliasDb() { + if (!aliasDb_) { + aliasDb_ = std::make_unique(graph_); + } + return aliasDb_.get(); + } + + c10::optional> mutation_filter_; + std::unique_ptr aliasDb_ = nullptr; + std::shared_ptr graph_; +}; + +// Removes list mutation with functional equivalents +// return true if graph is modified +TORCH_API bool RemoveListMutation(const std::shared_ptr& graph); + +// Replaces in-place aten ops with their functional equivalents +// when it can be proven that this does not change graph semantics +// if `mutation_filter` is present, the pass will only attempt to +// remove mutation on nodes which return true for the filter +// return true if graph is modified +TORCH_API bool RemoveTensorMutation( + const std::shared_ptr& graph, + c10::optional> mutation_filter = c10::nullopt); + +// Replaces in-place aten activation ops with their functional equivalence +TORCH_API bool InplaceToFunctionalActivation( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..c7b80423dc5eafd88eb8f22255e472fda0d954ab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace jit { + +struct Graph; +struct ArgumentSpec; + +TORCH_API void PropagateRequiresGrad(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..670072a0b09b45337cd8bd80eb5bd9e12ee7f0dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Graph; + +struct propagation_error : std::exception {}; + +class PropertyPropBase { + // Used for both Shape Propagation and Dtype/Device Propagation + public: + explicit PropertyPropBase(std::shared_ptr graph) + : graph_(std::move(graph)) {} + virtual ~PropertyPropBase() = default; + + void propagateBlock(Block* block, bool insert_expands = true); + // insert_expands is used for shape inference + + void processIf(Node* node); + void processLoop(Node* node); + + protected: + virtual void propagateNode(Node* node, bool insert_expands = true) = 0; + void setUnshapedType(Value* o); + void setUnshapedType(Node* node); + std::shared_ptr graph_; +}; + +TORCH_API void EraseShapeInformation(const std::shared_ptr& graph); +TORCH_API void PropagateInputShapes(const std::shared_ptr& graph); + +TORCH_API bool mergeTypes( + ArrayRef lhs, + ArrayRef rhs, + ArrayRef outputs); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..d932c0c1f74fa73b14e8d041a55e8a82d33bdd62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h @@ -0,0 +1,117 @@ +/** This file defines API for pattern-based subgraph rewrites. + * + * The API can be used for finding concrete patterns in the model and replacing + * the corresponding subgraphs with another subgraph. A special case of such + * rewrites is fusion, where the new subgraph consists of just a single node. + * + * There is a default set of the most common patterns that everyone could use. + * Alternatively, an arbitrary pattern can be registered. + */ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +// Forward declarations. +struct RewritePatternDescr; +struct Match; + +using MatchFilter = std::function< + bool(const Match&, const std::unordered_map&)>; + +/** Run pattern-based subgraph rewrites on all methods in the module. + * + * This pass will go through all methods in the module and try to replace all + * recognized patterns (see SubgraphRewriter::RegisterDefaultPatterns for the + * list of these patterns). + */ +TORCH_API Module PatternBasedRewrite(const Module& module); + +/** A class implementing API for pattern-based subgraph rewrites. + * + * To perform pattern-based subgraph rewrites on a module using this API, one + * needs to create an object of such class, register rewrite patterns and run + * the transformation pass (`runOnModule`). + * + * To use standard patterns, one could use `RegisterDefaultPatterns`. + * + * To enable rewrites of custom patterns, the custom patterns must be registered + * with `RegisterRewritePattern`. + */ +class TORCH_API SubgraphRewriter { + public: + // Run pattern-based subgraph rewrite pass on the module. + Module runOnModule(const Module& module); + + // Run pattern-based subgraph rewrite pass on the graph (used in testing). + // `filter` is a function that does extra filtering on the match. If it + // returns false for a given Match, we'll skip the Match. The filter + // function's arguments consist of a Match and a value map from parsing the + // pattern graph. Both the Match and the value map are necessary because we + // need to 1) do extra filtering on the matched result as well as 2) refer to + // the values in the matched result through the values in the pattern graph. + void runOnGraph( + std::shared_ptr& graph, + const std::vector& filters); + + void runOnGraph( + std::shared_ptr& graph, + const MatchFilter& filter = + [](const Match&, const std::unordered_map&) { + return true; + }) { + runOnGraph(graph, std::vector({filter})); + } + + // Register standard rewrite patterns. + void RegisterDefaultPatterns(); + + /** Register a custom rewrite pattern. + * + * The method takes two parameters specifying the pattern: + * \p PATTERN - IR string representing the pattern subgraph. + * \p REPLACEMENT - IR string representing the replacement subgraph. + * \p value name map - vector of pairs mapping values in the replacement graph + * to the values in the pattern graph. Used for preserving source range info + * across graph rewrite. + * + * See examples of pattern registering in `RegisterDefaultPatterns`. + */ + void RegisterRewritePattern( + const std::string& pattern, + const std::string& replacement, + const std::vector>& value_name_pair = + {}); + + private: + std::vector patterns_; + std::unordered_set nodes_to_delete_; + + void rewriteSinglePatternOnGraph( + std::shared_ptr& graph, + const RewritePatternDescr& pattern, + const std::vector& filters); + + bool overlapsWithPreviousMatches(const Match* match); +}; + +/** Rewrite pattern descriptor. + * + * This structure is used in the implementation of `SubgraphRewriter` and + * is not supposed to be used externally. + */ +struct RewritePatternDescr { + std::string pattern; + std::string replacement; + std::unordered_map value_name_map; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..824740792aaf031a0adcc181cb84a666ef539fe4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE + +TORCH_API void PropagateShapesOnGraph(std::shared_ptr& graph); + +// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE +// From [beg, end) attempt to propagate shapes and +// build up a graph that will compute all remaining symbolic +// shapes in [beg, end) that can be executed before beg + +struct ShapeComputeGraphMapping { + ShapeComputeGraphMapping( + std::shared_ptr partial_eval_shape_graph, + std::unordered_map + enclosing_graph_value_to_shape_graph_input, + std::unordered_map graph_output_to_symbolic_shape_dim) + : partial_eval_shape_graph(std::move(partial_eval_shape_graph)), + enclosing_graph_value_to_shape_graph_input_( + std::move(enclosing_graph_value_to_shape_graph_input)), + graph_output_to_symbolic_shape_dim_( + std::move(graph_output_to_symbolic_shape_dim)){}; + + std::shared_ptr partial_eval_shape_graph; + std::unordered_map + enclosing_graph_value_to_shape_graph_input_; + std::unordered_map graph_output_to_symbolic_shape_dim_; +}; + +TORCH_API c10::optional +PropagateShapesAndBuildLargeShapeComputeGraph( + std::shared_ptr& graph, + Node* beg, + Node* end); + +// don't insert complete tensor shapes in shape compute graphs and instead +// rely on our partial evaluation pipeline to propagate information. +// this is a good proxy for our ability to propagate non-complete shape +// information. +TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value); +TORCH_API bool symbolicShapeAnalysisTestModeEnabled(); + +using SSAInput = std::variant; +TORCH_API c10::optional> +calculateSymbolicShapesOnOp( + const FunctionSchema* schema, + const std::vector& inputs); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..414d699d2e4cb762d8e759081b761345f5cd55aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace jit { + +// Takes in a TensorExprGraph of static shapes and generalizes the input shapes +// to symbolic dimensions. Dimensions of value 1 will be preserved, otherwise +// dimensions with the same value will be bucketed to the same symbolic shape. +// E.g. Tensor(5, 3), Tensor(3, 1) -> Tensor(SS(-1), SS(-2)), Tensor(SS(-2), 1) +// From there, runs symbolic shape inference on the graph, and creates a +// versioning if in the graph with prim::TensorExprDynamicGuard checking if +// the inputs at runtime match the Generalized Symbolic Shapes that are inputs +// to the TE Kernel. The computate to calculate all symbolic dimensions is +// inlined in to the if block with the TE Kernel. All Sym Dim Value* are +// appended to the end of the TE Kernel Graph/Node inputs, and the Node is +// augmented with a integer list attr `symbolic_shape_inputs` that gives the +// mapping from Value * -> Symbolic Shape int64_t value. For more lengthy IR +// examples and walkthrough look at ShapeAnalysisTest.DynamicShapesFusion in +// `test_shape_analysis` Returns True on Success, False on Failure, can fail if +// shape propagation fails to propagate # of dims or if complete shapes on +// inputs not set + +TORCH_API bool GenerateGuard( + Node* tensorexpr_graph_node, + bool add_composed_op = false); + +TORCH_API void runTensorExprDynamicGroup(const Code& code, Stack& stack); + +enum class StrideInput { + // Tensors natively store whether they are contiguous or not as a property + // this makes it faster to query `is_contiguous` or + // `is_contiguous(memory_format=channels_last)` + // than looping through the sizes/strides yourself + // For tensors with these properties, we only store one value: + TENSOR_CONT, + TENSOR_CONT_CHANNELS_LAST, + // now, we describe other cases, where there is one stride enum + // per dimension + S_ONE, // STRIDE_ONE: packed + S_CONT, // STRIDE_CONTIGUOUS: stride[i + 1] * sizes[i + 1] + S_TRAN_CONT, // STRIDE_TRANSPOSED_CONTIGUOUS: stride[i-1] * sizes[i-1] + S_AS_ARG, // STRIDE_AS_ARG: stride passed in as runtime value +}; + +TORCH_API std::string toString(StrideInput si); +TORCH_API StrideInput strideInputFromString(const std::string& si); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..d951982fde2990f335ed26e688767b349ac1cb5b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +// Run TensorExpressions-based fuser. +// If add_composed_op is true, creates a single operation that +// performs both the runtime check that types align +// and then the dispatch to the kernel/unoptimized graph +TORCH_API void FuseTensorExprs( + std::shared_ptr& graph, + size_t min_group_size = 2, + bool add_composed_op = false, + bool fuse_to_dynamic_shapes = false); + +TORCH_API void setTensorExprFuserEnabled(bool val); +TORCH_API bool tensorExprFuserEnabled(); +TORCH_API void setTensorExprDynamicShapeFusionEnabled(bool val); +TORCH_API bool tensorExprDynamicShapeFusionEnabled(); +TORCH_API bool setTexprReductionsEnabled(bool value); +TORCH_API bool texprReductionsEnabled(); + +TORCH_API void RemoveProfileNodesAndSpecializeTypes( + std::shared_ptr& graph); +TORCH_API bool hasTensorTypeSpecialization(Value* v); +TORCH_API void RemoveTensorTypeSpecializations(std::shared_ptr& graph); +TORCH_API void removeTensorTypeSpecializations(Block* block); + +using tensor_type_converter_t = + c10::function_ref; + +// inserts a TypeCheck pattern +// +// around the guarded node that has a Subgraph attribute, this inserts a pattern +// +// if TypeCheck(...): +// guarded_node +// else: +// FallbackGraph(...) +// +// The TypeCheck includes the types of all Tensor inputs to the guarded_node, +// as processed by the type_converter, a lambda +// TensorTypePtr(const TensorTypePtr& t). This allows to erase irrelevant +// aspects of the type. +// +// The Fallback graph will have the same subgraph as the guarded node (with the +// expectation that the guarded_node's subgraph will then be optimized. +TORCH_API void insertTypeGuard( + Node* guarded_node, + tensor_type_converter_t type_converter, + c10::Symbol kind); + +TORCH_API bool usedOnlyInSize(Value* v); +TORCH_API Value* broadcastSizes(at::ArrayRef sizes, AliasDb* db); + +namespace tensorexpr { +TORCH_API bool isSupported(Node* node); + +/// Get the modifiable custom operator set object. +/// +/// For static shapes, if a custom operator has been added to the custom +/// operator set, it will be pulled into the NNC fusion group. But it doesn't +/// work with dynamic shapes unless explicitly register the shape function via +/// `torch::jit::RegisterShapeComputeGraphForSchema` for the custom operator. +/// +/// @return Reference of the custome operator set +/// +TORCH_API OperatorSet& getCustomOperatorSet(); +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..eb51ba00c4c9f8d2ca07dd96def6f5e168160e35 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Because differentiable graphs detach the gradients of input Tensors, +// creating and inlining differentiable graphs changes the requires_grad +// property of tensors in the graph. This pass updates prim::profiles +// requires_grad to keep profiled properties up to date, it does not update +// grad properties of other nodes like graph inputs bc the only downstream +// user of the grad property is the profiling executor, which just uses +// the types of prim::profiles +TORCH_API void UpdateDifferentiableGraphRequiresGrad( + std::shared_ptr& diff_forward_graph, + c10::optional new_requires_grad); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..aa2ab4ea421f5cbba34d9cb973cb8ebe7bf5800d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Refine from Value of type List -> len of list +// If a refinement mapping of List Value * -> len is present in a block +// the list is guaranteed to be that length +// TODO: vector may be faster +using ListRefinement = std::unordered_map; + +TORCH_API ListRefinement +intersectRefinements(const ListRefinement& ref1, const ListRefinement& ref2); + +TORCH_API ListRefinement +unionRefinements(const ListRefinement& ref1, const ListRefinement& ref2); + +// Represents the refinement information that can be carried on a boolean +struct BooleanRefinementMapping { + BooleanRefinementMapping( + ListRefinement true_refine, + ListRefinement false_refine) + : true_refine_(std::move(true_refine)), + false_refine_(std::move(false_refine)){}; + BooleanRefinementMapping() = default; // empty + + static BooleanRefinementMapping FalseRefinements( + ListRefinement false_refine) { + return BooleanRefinementMapping({}, std::move(false_refine)); + } + + static BooleanRefinementMapping TrueRefinements(ListRefinement true_refine) { + return BooleanRefinementMapping(std::move(true_refine), {}); + } + + BooleanRefinementMapping intersectBooleanRefinementMapping( + BooleanRefinementMapping& other) { + return BooleanRefinementMapping( + intersectRefinements(true_refine_, other.true_refine()), + intersectRefinements(false_refine_, other.false_refine())); + } + + ListRefinement& true_refine() { + return true_refine_; + } + + ListRefinement& false_refine() { + return false_refine_; + } + + private: + ListRefinement true_refine_; + ListRefinement false_refine_; +}; + +TORCH_API void joinIfRefinements( + Node* if_node, + std::unordered_set& throwing_blocks, + ListRefinement& curr_block_refinements, + ListRefinement& true_block_refinements, + ListRefinement& false_block_refinements, + std::unordered_map& info); + +// handles adding blocks to throwing blocks and propagating refinements via +// boolean comparisons +TORCH_API bool handleCommonRefinentOperators( + Node* n, + std::unordered_set& throwing_blocks, + std::unordered_map& info); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..d1a64c52c9230ad85a3c3540e120b48532abd707 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void transformConv1dToConv2d(std::shared_ptr& graph); +TORCH_API void transformConv1dToConv2d(script::Module& module); +TORCH_API void insertPrePackedOps(std::shared_ptr& graph); +TORCH_API void insertPrePackedOps(script::Module& module); +TORCH_API void fusePrePackedLinearConvWithClamp(script::Module& module); +TORCH_API void FoldPrePackingOps(script::Module& module); +TORCH_API script::Module optimizeForMobile( + const script::Module& module, + const std::set& optimization_blocklist = {}, + const std::vector& preserved_methods = {}); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h new file mode 100644 index 0000000000000000000000000000000000000000..06c77edca718cad76bc0db1f63137087fcdaf41b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h @@ -0,0 +1,511 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace torch::jit { + +// GraphExecutor creates specializations of Graphs for different +// dimensionalitities and types of inputs. + +struct ArgumentInfo { + friend struct ArgumentSpec; + using plain_data_type = uint64_t; + + bool defined() const { + return defined_; + } + at::Device device() const { + return at::Device(DeviceType(dev_type_), device_); + } + // XXX: It is guaranteed that this will return false when called on non-tensor + // arguments + bool requires_grad() const { + return requires_grad_; + } + int dim() const { + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + return dim_; + } + at::ScalarType type() const { + return at::ScalarType(type_); + } + TypePtr toType() const { + if (!defined()) + return TensorType::get(); + + return TensorType::create( + type(), device(), c10::optional(dim()), requires_grad()); + } + operator TypePtr() const { + return toType(); + } + + private: + unsigned defined_ : 1; + unsigned requires_grad_ : 1; + unsigned : 5; + unsigned dim_ : 8; + unsigned device_ : 8; + unsigned type_ : 8; + unsigned dev_type_ : 16; + unsigned : 16; +}; + +static_assert( + std::is_standard_layout::value, + "ArgumentInfo is to be a POD struct"); +static_assert( + sizeof(ArgumentInfo) == sizeof(ArgumentInfo::plain_data_type), + "ArgumentInfo is expected to be a 32-bit struct"); + +struct ArgumentSpec { + ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs) + : hash_code(c10::hash_combine( + num_flat_tensor_inputs, + num_flat_optional_inputs)) { + tensor_args.reserve(num_flat_tensor_inputs); + optional_presence.reserve(num_flat_optional_inputs); + } + + void addOptional(const IValue& input) { + bool is_present = !input.isNone(); + optional_presence.push_back(is_present); + hash_code = c10::hash_combine(hash_code, is_present); + } + + void addTensor(const IValue& input, bool with_grad) { + AT_ASSERT(input.isTensor(), "Expected Tensor but found ", input.tagKind()); + tensor_args.emplace_back(); + auto& arg = tensor_args.back(); + // Initialize all fields to 0. This is convenient, because e.g. + // requires_grad() can be checked even on tensors AND will make + // padding bits all 0s. + std::memset(&arg, 0, sizeof(ArgumentInfo)); + + // [argspec refcounting] reinterpret the IValue to avoid having to refcount + // the Tensor microbenchmarks + // https://github.com/zdevito/pytorch/commit/21e7200a0a0fc456bea2f10e95b1781f83933d10 + // show overhead in extra refcounting along this path + const at::Tensor* t = reinterpret_cast(&input); + arg.defined_ = t->defined(); + if (arg.defined_) { + arg.requires_grad_ = with_grad && autograd::Variable(*t).requires_grad(); + arg.dim_ = t->dim(); + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + at::Device device = t->device(); + arg.dev_type_ = + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + static_cast::type>(device.type()); + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + arg.device_ = device.index(); + arg.type_ = static_cast(t->scalar_type()); + } + combineHash(arg); + } + + void combineHash(const ArgumentInfo& arg) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + ArgumentInfo::plain_data_type arg_data; + std::memcpy(&arg_data, &arg, sizeof(ArgumentInfo)); + hash_code = c10::hash_combine(hash_code, arg_data); + } + + // equality is fast: check ninputs, and then check the raw array data, + // there are no size/stride indirections + // hopefully std::vector has fast equality + bool operator==(const ArgumentSpec& spec) const { + if (optional_presence != spec.optional_presence) { + return false; + } + if (tensor_args.size() != spec.tensor_args.size()) + return false; + // NB: we need to break out early when there are no elements, because + // passing a nullptr to memcmp is UB. + if (tensor_args.empty()) + return true; + return std::memcmp( + tensor_args.data(), + spec.tensor_args.data(), + tensor_args.size() * sizeof(ArgumentInfo)) == 0; + } + bool operator!=(const ArgumentSpec& spec) const { + return !(*this == spec); + } + size_t numTensors() const { + return tensor_args.size(); + } + const ArgumentInfo& tensorAt(size_t i) const { + return tensor_args[i]; + } + size_t numOptionals() const { + return optional_presence.size(); + } + bool isPresent(size_t i) const { + return optional_presence[i]; + } + size_t hashCode() const { + return hash_code; + } + + private: + size_t hash_code; // precomputed on construction + std::vector tensor_args; + std::vector optional_presence; +}; + +namespace { +static constexpr size_t ARG_SPEC_DEPTH_LIMIT = 128; +} + +// ArgumentSpecCreator takes an initial graph and comes up with a set +// of simple instructions to compute the ArgumentSpec given a set of +// input tensors. +struct TORCH_API ArgumentSpecCreator { + // instructs acts on a stack of a list of input IValues + // at the beginning the stack contains a single list of the inputs to the + // function the ENTER_ instructs descend into subobjects and push new lists + // onto the stack + enum Inst : char { + ENTER_TUPLE, // consume a tuple ivalue from the top-most list, and push the + // list of its elements onto the stack as a new list + ENTER_OBJECT, // same as ENTER_TUPLE, but the input is a class + LEAVE, // pop the top-most list from the stack + SKIP, // consume an element from the top-most list, and discard + SPECIALIZE_OPTIONAL_TENSOR, // consume a optional tensor for the top-most + // list, and add it to the ArgSpec key being + // created + SPECIALIZE_TENSOR, // consume a tensor for the top-most + // list, and add it to the ArgSpec key being created + SPECIALIZE_OPTIONAL, + // consume a nontensor optional from the top-most list, + // and add it to the ArgSpec key being created + }; + ArgumentSpecCreator(Graph& graph); + ArgumentSpec create(bool with_grad, const Stack& stack) const; + void specializeTypes(Graph& g, const ArgumentSpec& spec) const; + void dump() const; + using WrittenSlots = std::unordered_set; + + private: + void scan( + const TypePtr& typ, + size_t depth, + const WrittenSlots& written_slots); + size_t num_inputs_; + size_t num_tensors_ = 0; + size_t num_optionals_ = 0; + std::vector instructions_; +}; + +// CompleteArgumentSpec represents one particular specialization. +// It is designed so that it can be created, hashed, and compared quickly +// since it is used along the hot-path of the JIT to check if the code +// we have created is valid for the given inputs. + +// COmpleteArgumentInfoPOD is only used internally in CompleteArgumentSpec +// API users should use ArgumentInfo +struct CompleteArgumentInfoPOD { + // total size is 64-bit + unsigned is_tensor : 8; // all other fields are invalid if this is false + unsigned type : 8; // scalar type + unsigned defined : 1; + unsigned requires_grad : 1; + signed device : 14; + unsigned dev_type : 16; + unsigned + total_dims : 16; // all TensorInfoPODs are in CompleteArgumentSpec's + // tensor_info() array. total_dims is the total number of + // dimensions seen so far in all previous members of + // tensor_info(), including this tensor 2*total_dims + // becomes the offset into the sizes_strides list for the + // _next_ tensor in the tensor_info array for tensor 0, + // the offset is always 0 +}; + +static_assert( + sizeof(CompleteArgumentInfoPOD) == sizeof(int64_t), + "CompleteArgumentInfoPOD must be 64-bit struct for CompleteArgumentSpec encoding to work"); + +struct CompleteArgumentInfo; + +struct CompleteArgumentSpec { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CompleteArgumentSpec(bool with_grad, at::ArrayRef inputs) + : hash_code(0), ninputs(inputs.size()) { + int32_t all_dims = 0; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + const int32_t num_inputs = inputs.size(); + for (const auto i : c10::irange(num_inputs)) { + if (!inputs[i].isTensor()) + continue; + auto& tensor = inputs[i].toTensor(); + all_dims += tensor.defined() ? tensor.ndimension() : 0; + } + // allocate enough room for all TensorPODs and dimensions + data.resize(ninputs + all_dims * 2); + + // and reinterpret our data array as these structs + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + auto* pods = reinterpret_cast(data.data()); + int64_t* next_dim = sizes_strides(); + int32_t total_dims = 0; + for (const auto i : c10::irange(num_inputs)) { + auto& pod = pods[i]; + pod.is_tensor = static_cast(inputs[i].isTensor()); + if (pod.is_tensor) { + at::Tensor t = inputs[i].toTensor(); + pod.defined = t.defined(); + if (pod.defined) { + pod.type = static_cast(t.scalar_type()); + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + at::Device device = t.device(); + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + pod.dev_type = static_cast::type>( + device.type()); + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + pod.device = device.index(); + pod.requires_grad = with_grad && t.requires_grad(); + total_dims += t.ndimension(); + auto sizes = t.sizes(); + std::copy(sizes.begin(), sizes.end(), next_dim); + next_dim += sizes.size(); + auto strides = t.strides(); + std::copy(strides.begin(), strides.end(), next_dim); + next_dim += strides.size(); + } + } + // each POD has a running tally of all dimensions including its own + TORCH_CHECK( + total_dims < std::numeric_limits::max(), + "The number of dims cannot be packed into CompleteArgumentSpec:", + total_dims); + pod.total_dims = total_dims; + } + // we precompute the hash_code to minimize the time inside of hash + // table operations where we may need to hold a compiler cache lock. + hash_code = c10::hash_combine(0, ninputs); + for (auto d : data) { + hash_code = c10::hash_combine(hash_code, d); + } + } + + // equality is fast: check ninputs, and then check the raw array data, + // there are no size/stride indirections + bool operator==(const CompleteArgumentSpec& spec) const { + return ninputs == spec.ninputs && data == spec.data; + } + bool operator!=(const CompleteArgumentSpec& spec) const { + return !(*this == spec); + } + friend struct CompleteArgumentInfo; + CompleteArgumentInfo at(size_t i) const; + size_t size() const { + return ninputs; + } + size_t hashCode() const { + return hash_code; + } + + private: + ArrayRef tensor_info() const { + return ArrayRef( + reinterpret_cast(data.data()), ninputs); + } + // the start of the sizes_strides information, which comes after the + // CompleteArgumentInfoPOD list. + const int64_t* sizes_strides() const { + return data.data() + ninputs; + } + int64_t* sizes_strides() { + return data.data() + ninputs; + } + size_t hash_code; // precomputed on construction + size_t ninputs; + // layout is ninputs of TensorPOD (each 64-bit) followed by their size and + // stride info for 3 tensors: + // [t0POD][t1POD][t2POD]... + // [t0 sizes][t0 strides][t1 sizes][t1 strides][t2 sizes][t2 strides] + std::vector data; +}; + +// public view of compressed CompleteArgumentInfo +struct CompleteArgumentInfo { + CompleteArgumentInfo(const CompleteArgumentSpec& spec, const int i) + : spec(spec), i(i) {} + bool isTensor() const { + return pod(i).is_tensor; + } + at::ScalarType type() const { + return at::ScalarType(pod(i).type); + } + bool defined() const { + return pod(i).defined; + } + bool requires_grad() const { + return pod(i).requires_grad; + } + at::Device device() const { + return at::Device( + DeviceType(pod(i).dev_type), + static_cast(pod(i).device)); + } + int ndimension() const { + // See [valid range], it is always valid to ask for offset for (i + 1) + return (sizes_strides_offset(i + 1) - sizes_strides_offset(i)) / 2; + } + at::IntArrayRef sizes() const { + return at::IntArrayRef( + spec.sizes_strides() + sizes_strides_offset(i), ndimension()); + } + at::IntArrayRef strides() const { + int ndim = ndimension(); + return at::IntArrayRef( + spec.sizes_strides() + sizes_strides_offset(i) + ndim, ndim); + } + operator TypePtr() const { + if (!defined()) + return TensorType::get(); + return TensorType::create( + type(), + device(), + c10::VaryingShape{sizes()}, + c10::VaryingShape{strides()}, + requires_grad()); + } + + private: + // offsetinto sizes_strides() array where the sizes start for tensor j + // [valid range] valid range is [0, ninputs] + // (i.e. you can ask for the offset at ninputs, which would be the offset of + // the next tensor if it existed) + int sizes_strides_offset(int j) const { + if (j == 0) + return 0; + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + return 2 * pod(j - 1).total_dims; + } + const CompleteArgumentInfoPOD& pod(int j) const { + return spec.tensor_info().at(j); + } + const CompleteArgumentSpec& spec; + const int i; +}; + +inline std::ostream& operator<<(std::ostream& out, const ArgumentInfo& info) { + if (!info.defined()) { + return out << ""; + } + out << "Tensor(device=" << info.device() << ", type=" << toString(info.type()) + << ", requires_grad=" << info.requires_grad() << ", dims=" << info.dim() + << ")"; + return out; +} + +inline std::ostream& operator<<(std::ostream& out, const ArgumentSpec& spec) { + out << "{"; + for (const auto i : c10::irange(spec.numTensors())) { + if (i > 0) + out << ", "; + out << spec.tensorAt(i); + } + out << "; "; + for (const auto i : c10::irange(spec.numOptionals())) { + if (i > 0) + out << ", "; + out << spec.isPresent(i); + } + out << "}"; + return out; +} + +inline std::ostream& operator<<( + std::ostream& out, + const CompleteArgumentInfo& info) { + if (!info.defined()) { + return out << ""; + } + out << "Tensor(device=" << info.device() << ", type=" << toString(info.type()) + << ", requires_grad=" << info.requires_grad() + << ", sizes=" << info.sizes() << ", strides=" << info.strides() << ")"; + return out; +} + +inline std::ostream& operator<<( + std::ostream& out, + const CompleteArgumentSpec& spec) { + out << "{"; + for (const auto i : c10::irange(spec.size())) { + if (i > 0) + out << ", "; + out << spec.at(i); + } + out << "}"; + return out; +} + +inline CompleteArgumentInfo CompleteArgumentSpec::at(size_t i) const { + return CompleteArgumentInfo(*this, i); +} + +inline c10::optional convertOptional( + c10::optional const& from) { + return (from) ? c10::optional(static_cast(*from)) + : c10::optional{}; +} + +} // namespace torch::jit + +namespace std { + +template +struct hash> { + size_t operator()(const c10::VaryingShape& vs) const { + return c10::get_hash( + vs.size(), + vs.size() ? vs.sizes().value() : std::vector>()); + } +}; + +template <> +struct hash { + size_t operator()(const c10::TensorType& ptt) const { + return c10::get_hash< + c10::optional, + c10::VaryingShape, + c10::VaryingShape, + c10::optional>( + torch::jit::convertOptional(ptt.scalarType()), + ptt.sizes(), + ptt.strides(), + ptt.requiresGrad()); + } +}; + +template <> +struct hash { + size_t operator()(const torch::jit::ArgumentSpec& spec) const { + return spec.hashCode(); + } +}; +template <> +struct hash { + size_t operator()(const torch::jit::CompleteArgumentSpec& spec) const { + return spec.hashCode(); + } +}; +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h new file mode 100644 index 0000000000000000000000000000000000000000..32a8166caf0e5936f3eea292aa7a895ad6ddbc58 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include + +#include +#include + +namespace torch::jit { + +using value_list = std::vector; +// clang-format off +// Example showcasing how Gradient is constructed: +// +// Let's assume we have a function f, `m` and `n` do not require grad +// (`n` can depend only on `m`): +// y, n = f(x, m) +// +// Now, let's assume that the reverse of f (called f') needs to use values of `x`, `t` and `y`. +// `t` is an intermediate value produced in the body of f, and let's assume that it requires +// grad too. +// +// In this case differentiate(f) will return this: +// y, n, t = f(x, m) // `t` is appended to the output list +// dx = f'(dy, dt, x, t, y) // No `dm` or `dn` because they do not require gradient +// // All needed values from f are prepended to the input list +// +// f_real_outputs = 2 // Only first two outputs were present in f originally +// df_input_vjps = {0, 2} // i.e. connect grad_fn of y and t variables produced by f, +// y t // with y's output_nr = 0 and t's output_nr = 1 +// df_input_captures = {I0, O2, O0} // Order matches the prefix of inputs to df +// x t y +// df_output_vjps = {0} // i.e. connect next_edge[0] of grad_fn to x's (grad_fn, output_nr). +// +// Terminology: vjp = vector-jacobian product +// clang-format on + +struct Gradient { + explicit operator bool() const { + return df != nullptr; + } + std::shared_ptr f; + std::shared_ptr df; + + // Describes how to construct outputs of f from what its graph will return. + // This is necessary because some trailing outputs are intermediates produced + // only to be saved for df (and should be ignored). + size_t f_real_outputs = 0; // initialized for safety. + + // df inputs are split into two sections: vjps (aka grad_outputs) and + // captures. VJPs are "seeds" for the gradient computation given for each + // input capture of an Output kind. Captures are values the need to be saved + // when f is run. We handle inputs specially, because this allows us to avoid + // adding extra vjps as df inputs. + + std::vector df_input_vjps; // Offsets into f's outputs. + // capture can come from inputs or outputs + std::vector df_input_captured_inputs; // Offsets into f's inputs + std::vector df_input_captured_outputs; // Offsets into f's outputs + + // df will produce vjps for a subset of inputs of f that required grad. + // df_output_vjps[idx] == inp_idx means that idx-th output of df produces a + // vjp for inp_idx-th input of f. + std::vector df_output_vjps; // Offsets into f's inputs. + + // How to use gradient to implement a differentiable autograd function: + // When running f: + // - Unwrap input Variables + // - Run f's graph + // - Create grad_fn + // - Wrap outputs in Variables (assume we have a tensor_outputs array): + // outputs = map(Variable, tensor_output) + // for i, offset in enumerate(df_input_vjps): + // outputs[offset].set_grad_fn(grad_fn, output_nr=i) + // - Use df_output_vjps to connect next_edges of grad_fn: + // for idx in df_output_vjps: + // grad_fn.add_next_edge(inputs[idx].gradient_edge()) + // - Save captures for df (care needs to be taken to use SavedVariables for + // inputs and outputs that we will actually return) + // - Return outputs[:f_real_outputs] + // + // When running df: + // - Concatenate received vjps and captured Variables + // - Interpret df + // - Wrap outputs of df into Variables (that don't require grad) +}; +TORCH_API Gradient differentiate(std::shared_ptr& graph); + +// can we take a derivative of this node symbolically? +TORCH_API bool isDifferentiable(const Node* n); +TORCH_API bool isDifferentiable(Graph& g); +TORCH_API bool isZero(Value* v); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h new file mode 100644 index 0000000000000000000000000000000000000000..e1aff151f35e421e1d06be6de259953b83c23ba1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +// Calculates the number of args that need to be passed in. +// Less args may be needed if defaults are provided. +// Returns: {number args needed, number of out args} +inline std::pair CalculateNecessaryArgs( + const std::vector& schema_args, + at::ArrayRef actual_inputs, + bool allow_trailing_out_args) { + if (schema_args.empty()) { + return std::make_pair(0, 0); + } + + // count number of out arguments + int64_t schema_idx = static_cast(schema_args.size()) - 1; + if (allow_trailing_out_args) { + // skip over out arguments in the end. + while (schema_idx >= 0) { + const auto& current_arg = schema_args.at(schema_idx); + if (!current_arg.is_out()) { + break; + } + schema_idx--; + } + } + + int64_t num_out = static_cast(schema_args.size()) - schema_idx - 1; + + if (schema_args.size() < actual_inputs.size()) { + return std::make_pair(actual_inputs.size(), num_out); + } + + // if it is the default args, we reset the index to the last element + if (!allow_trailing_out_args) { + schema_idx = schema_args.size() - 1; + } + // keeps track of trailing unnecessary args + while (schema_idx >= 0) { + // this means it is not default argument, so it is necessary + if (!schema_args.at(schema_idx).default_value().has_value()) { + return std::make_pair(schema_idx + 1, num_out); + } else { + auto schema_value = + schema_args.at(schema_idx).default_value().value().toIValue(); + // non-const value will become nullptr here, so will be marked necessary + // non-const would include prim::ListConstruct, prim::DictConstruct as + // well. + auto actual_value = toIValue(actual_inputs[schema_idx]); + if (!actual_value.has_value()) { + return std::make_pair(schema_idx + 1, num_out); + } + // if the IR has same value as default value of the schema, + // it is not necessary argument. + if (schema_value != actual_value.value()) { + return std::make_pair(schema_idx + 1, num_out); + } + } + schema_idx--; + } + return std::make_pair(0, num_out); +} + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h new file mode 100644 index 0000000000000000000000000000000000000000..64d514374f58e69b732133ce324053d6d1bebc4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +/// Registration class for new operators. Effectively calls +/// `torch::jit::registerOperator` for every supplied operator, but allows doing +/// so in the global scope when a `RegisterOperators` object is assigned to a +/// static variable. +/// Note: This is *not* the custom operator API. If you want to register custom +/// operators, take a look at torch::RegisterOperators. +struct TORCH_API RegisterOperators { + RegisterOperators() = default; + + /// Registers a vector of already created `Operator`s. + /// The operator element is now optional to filter null ops. It's backward + /// compatible and works for selective operator registration. + explicit RegisterOperators(std::vector> operators) { + for (c10::optional& o : operators) { + if (o) { + registerOperator(std::move(o.value())); + } + } + } +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..8633609bcf2a89d214a501d11cc803c86492b198 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h @@ -0,0 +1,33 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +TORCH_API c10::optional> GetDecomposition( + const FunctionSchema& schema); + +TORCH_API void RegisterDecomposition( + const FunctionSchema& schema, + std::shared_ptr g); + +TORCH_API void RunDecompositions(std::shared_ptr g); + +TORCH_API c10::optional GetDecompositionFunction( + const FunctionSchema& schema); + +// For invocation in C++, recommended is to assign to static local variable +TORCH_API Function* GetDecompositionExecutor(const char* schema_literal); + +TORCH_API Function* GetDecompositionExecutor(const FunctionSchema& schema); + +TORCH_API void run_jit_decomposition( + const c10::OperatorHandle& op, + torch::jit::Stack* stack); + +TORCH_API bool has_jit_decomposition(const FunctionSchema& schema); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h new file mode 100644 index 0000000000000000000000000000000000000000..08b5750957b2ae31deacaaae0deae35473c91fce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedDecompositions(); + +TORCH_API const OperatorMap& GetDecompositionMapping(); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f00272a999f3d9431528db7d8e74ff0cc3d823 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h @@ -0,0 +1,29 @@ +#pragma once +#include +#include + +namespace torch::jit { + +struct ExceptionMessage { + ExceptionMessage(const std::exception& e) : e_(e) {} + + private: + const std::exception& e_; + friend std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg); +}; + +inline std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg) { + auto c10_error = dynamic_cast(&msg.e_); + if (c10_error) { + out << c10_error->what_without_backtrace(); + } else { + out << msg.e_.what(); + } + return out; +} + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..90021a6154295672178b0f9b35c3d44f20f51287 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h @@ -0,0 +1,142 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +C10_DECLARE_bool(torch_jit_enable_new_executor); + +namespace torch::jit { +struct GraphExecutorState; +struct Code; + +enum ExecutorExecutionMode { + SIMPLE, + PROFILING, +}; + +struct ExecutionPlan { + ExecutionPlan() = default; + ExecutionPlan(std::shared_ptr graph, std::string function_name) + : code(graph, std::move(function_name)), graph(std::move(graph)) {} + + operator bool() const { + return static_cast(graph); + } + + Code code; + std::shared_ptr graph; +}; + +// Notice that those structs don't manage lifetime of their members. +// They are only valid only right after you call getDebugState() and should +// never be used again once another GraphExecutor function is called. + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct GraphExecutorState { + const Graph* graph = nullptr; + ExecutionPlan fallback; // XXX: members of this field are optional + std::unordered_map execution_plans; +}; + +struct TORCH_API EnableProfilingGuard { + EnableProfilingGuard(); + ~EnableProfilingGuard(); + + private: + bool old_executor_mode = false; + bool old_get_optimize = false; +}; + +struct GraphExecutorImplBase; +struct TORCH_API GraphExecutor { + GraphExecutor() = default; + GraphExecutor(const std::shared_ptr& graph, std::string function_name); + + GraphExecutor( + const std::shared_ptr& graph, + std::string function_name, + ExecutorExecutionMode executor_mode); + + void run(Stack& inputs); + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch); + + // `remaining_bailout_depth` stands for the maximum number of profiled and + // specialized recompilations allowed for the current `GraphExecutor`. if + // remaining_bailout_depth is equal to 0, `GraphExecutor` won't perform any + // profiling and specialization. This is also equivalent to the + // SIMPLE_EXECUTOR mode. if remaining_bailout_depth is greater than 0, + // `GraphExecutor` will profile and specialize its input graph based on the + // profiled information whenever a bailout check is failed/triggered, a new + // `GraphExecutor` will be created. This new `GraphExecutor`'s + // remaining_bailout_depth will be reduced by 1. + // If no bailout depth is passed, the depth will be initialized from the + // current global fusion strategy settings. + const ExecutionPlan& getPlanFor( + Stack& inputs, + c10::optional remaining_bailout_depth = c10::nullopt); + GraphExecutorState getDebugState(); + + void debugFlushCompilationCache(); + + bool isOptimized() const; + + private: + std::shared_ptr pImpl; +}; + +TORCH_API Node* replaceBlockWithFallbackGraph( + Block* b, + ArrayRef inputs); + +// These passes need to run before it is valid to pass to the interpreter +// regardless of whether sizes have been specialized or not. +TORCH_API void runRequiredPasses(const std::shared_ptr& g); + +TORCH_API void debugSetFusionGroupInlining(bool state); +TORCH_API bool getFusionGroupInlining(); + +TORCH_API void debugSetAutodiffSubgraphInlining(bool state); +TORCH_API std::shared_ptr lastExecutedOptimizedGraph(); + +TORCH_API std::atomic& getProfilingMode(); +TORCH_API std::atomic& getExecutorMode(); +TORCH_API std::atomic& getNumProfiledRuns(); +TORCH_API size_t getBailoutDepth(); +TORCH_API bool IsNewExecutorEnabled(); + +struct TORCH_API GraphOptimizerEnabledGuard { + GraphOptimizerEnabledGuard(bool state) + : old_state_(getGraphExecutorOptimize()) { + setGraphExecutorOptimize(state); + } + + ~GraphOptimizerEnabledGuard() { + setGraphExecutorOptimize(old_state_); + } + + bool old_state_; +}; + +namespace detail { + +GraphExecutor* getGradExecutor(Operation& op); + +GraphExecutor* getDifferentiableGraphOpExecutor(Operation& op); + +// for debugging information we expose a way to get the last actually +// run graph. Previous approaches allowed querying the GraphExecutor +// for what graph it would run in certain circumstances (graphFor), but +// this is fragile because we sometimes change how these decisions are made. +// This interface still allows our tests to look at optimized graphs, but +// with less plumbing. +} // namespace detail + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..c008902a2e8f71f1cb9eb2ce58b250971f488b50 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h @@ -0,0 +1,147 @@ +#include + +namespace torch::jit { + +// This class facilitates depth-first iteration over all nodes in a graph. +class DepthFirstGraphNodeIterator { + Node* current_; + + public: + // Constructor. + explicit DepthFirstGraphNodeIterator(std::shared_ptr& graph) + : current_(*(graph->block()->nodes().begin())) {} + + // Moves up and to the next node (may move up recursively). + void move_up() { + if (current_ == nullptr) { + return; + } + // Basically we start from the child block (which is current_) + // and we try to find the block that owns it. Now we need to check + // if that block is the graph root block, or if it is an If/Loop/etc + // block. + // + // If it's the graph root block we can stop because there is no "up" + // but if it is a node (e.g. If/Loop/etc) we need to apply logic + // based on where we are coming from to move to the next block. + // This might mean that we need to traverse up again (e.g. if we've + // reached the end of the else clause in an if block we need to go) + // up to the parent block that contains the if. + // + // Similarly if we've reached the end of the parent block containing + // the else clause we might need to go up again so this is a recursive + // function. + // + // BlockNode (if/loop/with) + // | + // [Block1] ... [Block2] + // | + // [ Node1, Node2, Node3, FromNode] + // + auto parent_block = current_->owningBlock(); + TORCH_INTERNAL_ASSERT(parent_block, "Every node must be owned by a block"); + + // Get the node that owns the parent block. This node has to be an if, + // loop, or with. + auto parent_node = parent_block->owningNode(); + if (parent_node == nullptr) { + // If there's no node that owns this current block then we're at the + // top of the graph and since we're trying to move up we have reached + // the end of the traversal. + current_ = nullptr; + return; + } + + // Check the type of node this root is. + if (parent_node->kind() == prim::If) { + // Need to check if we came from the `then` branch or the `else` branch. + auto* then_block = parent_node->blocks().at(0); + auto* else_block = parent_node->blocks().at(1); + + if (parent_block == else_block) { + // If else block then we move to the next node in the parent block. + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } else { + // If then block then move to the else block if it is not empty. + TORCH_INTERNAL_ASSERT(parent_block == then_block); + bool else_block_empty = + else_block->nodes().begin() == else_block->nodes().end(); + + if (!else_block_empty) { + current_ = *(else_block->nodes().begin()); + } else { + // Since it's empty we move to the next node. + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } + } + } else if ( + parent_node->kind() == prim::Loop || + parent_node->kind() == prim::With) { + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } else { + TORCH_INTERNAL_ASSERT( + false, "Only if/loop/with nodes should have child blocks"); + } + } + + // Moves to the next adjacent node or up in to the parent if that is not + // possible. + void move_next() { + if (current_ == nullptr) { + return; + } + + // Increment to the next node in the current block. + current_ = current_->next(); + + // Check if we're at the end of the block. If so we need + // to move upwards (if it makes sense to). + if (current_->kind() == prim::Return) { + move_up(); + } + } + + // Moves to the next node in the graph into children if it can. + void move_into() { + if (current_ == nullptr) { + return; + } + + // Check if we're currently on a node that contains sub-nodes. + if (current_->kind() == prim::If || current_->kind() == prim::Loop || + current_->kind() == prim::With) { + auto* first_block = current_->blocks().at(0); + current_ = first_block->param_node(); + // Move next will move up and out of the current node if the block is + // empty. `move_up` which is called by `move_next` will handle the + // difference between If, Loop, and With blocks appropriately. + move_next(); + } else { + move_next(); + } + } + + // Get the next Node in the graph. \returns nullptr if there are no nodes + // left. + Node* next() { + auto result = current_; + + // Try move into the existing node to set the next node to be returned. + // This will move to the next node if not possible, or move upwards and + // to the next. + move_into(); + + return result; + } +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h new file mode 100644 index 0000000000000000000000000000000000000000..73c78adbda03e5bd307979a28abcc648050c8a03 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { +// instruction look like: +// op_code X, N +// meaning of X, N depend on the op: +// O - index into operator table +// R - index into register table +// I - literal integer +// C - index into constant table +// P - jump offset relative to beginning of current instruction +// F - index into function table +// T - index into the type table, used for guard instructions +// S - index into object slots +// C - index into code table + +#define FORALL_OPCODES(_) \ + _(OP, "O") /* invoke operator X */ \ + _(OPN, "OI") /* invoke vararg operator X with N arguments */ \ + _(LOAD, "R") /* push a value from a register X */ \ + _(MOVE, "R") /* push a value from register X, clearing the register */ \ + _(STOREN, "RI") /* store N values to registers [X, X+N) */ \ + _(STORE, "R") /* store 1 value to registers X */ \ + _(DROP, "") /* drop 1 value from the top of the stack */ \ + _(DROPR, "R") /* clear register X */ \ + _(LOADC, "C") /* push the constant X */ \ + _(JF, "P") /* pop the top of the stack, if false, branch to P */ \ + _(JMP, "P") /* unconditional branch to X */ \ + _(LOOP, "PI") /* perform a loop, X is where to branch if cond is false */ \ + _(RET, "") /* exit execution */ \ + _(WAIT, "") /* wait for a future to be complete */ \ + _(CALL, "F") /* call function X */ \ + _(GUARD, "T") /* check a guard against type_table, true if passes */ \ + _(TYPECHECK, "TN") /* check each type of input[i] against type_table[X+N] */ \ + _(FAIL_GUARD, "T") /* fail a guard, patch back to GUARD */ \ + _(PROFILE_OP, "F") /* get a callback from profile_function_table at X */ \ + _(TAIL_CALL, "F") /* replace current frame with function F */ \ + _(INTERFACE_CALL, "CI") /* call method X on the first argument (of N) */ \ + _(GET_ATTR, "S") /* get attribute from slot X in an Object */ \ + _(SET_ATTR, "S") /* set attribute to slot X in an Object */ \ + _(LIST_UNPACK, "I") /* unpack list expecting length I */ \ + _(TUPLE_CONSTRUCT, "I") /* construct a tuple using X inputs */ \ + _(NAMED_TUPLE_CONSTRUCT, \ + "TI") /* construct a tuple of type X, using N inputs */ \ + _(LIST_CONSTRUCT, "TI") /* construct a list of type X, using N inputs */ \ + _(DICT_CONSTRUCT, "TI") /* construct a dict of type X, using N inputs */ \ + _(CREATE_OBJECT, "T") /* create an object of type X */ \ + _(ISINSTANCE, "TI") /* check object is one of types[X:X+N] */ \ + _(TUPLE_SLICE, "II") /* slice tup[X:(X+N)] */ \ + _(TUPLE_INDEX, "") /* get the value from a tuple at that index */ \ + _(RAISE_EXCEPTION, "") /* throws the exception from Python */ \ + _(DICT_INDEX, "") /* gets the value from the dict for given key */ \ + _(UNCHECKED_CAST, "") /* perform an unchecked cast operation */ \ + _(__IS__, "") /* performs `is` operator from Python */ \ + _(UN_INITIALIZED, \ + "") /* sets default values to variables that are uninitialized */ \ + _(__ISNOT__, "") /* performs `is not` operator from Python */ \ + _(FORMAT, "I") /* performs string format function `f strings` or `{}.format` \ + the number of inputs in stored in X */ \ + _(DEVICE, "") /* invokes aten::device for a Tensor */ \ + _(DTYPE, "") /* invokes aten::dtype for a Tensor */ \ + _(DIM, "") /* invokes aten::dim for a Tensor */ \ + _(__NOT__, "") /* performs `not` operator from Python */ \ + _(TO_LIST, "") /* convert the input to a list */ \ + _(NUM_TO_TENSOR, \ + "") /* performs the conversion of a number/scalar to Tensor */ \ + _(IS_CUDA, "") /* invokes aten::is_cuda for a Tensor */ \ + _(FORK, "CN") /* launch a thread to run code entry x with N inputs */ \ + _(WARN, "I") /* emit a warning with line information */ \ + _(ENTER, "EN") /* enter scope of a contextmanager */ \ + _(EXIT, "EX") /* exit the last entered contextmanager */ \ + _(AWAITABLE, "CN") /* initialize await for code entry x with N inputs */ + +enum OpCode : uint8_t { +#define DEFINE_OP(op, _) op, + FORALL_OPCODES(DEFINE_OP) +#undef DEFINE_OP +}; + +struct Instruction { + OpCode op; + uint8_t unused; + uint16_t N; + int32_t X; + // TODO: check for overflow + Instruction(OpCode op, int32_t X, uint16_t N) + : op(op), unused(0), N(N), X(X) {} +}; +std::ostream& operator<<(std::ostream& out, Instruction inst); + +bool isOpSupportedInMobile(OpCode op); +char const* toString(OpCode op); +OpCode parseOpCode(const char* str); +std::ostream& operator<<(std::ostream& out, Instruction inst); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..75e4ebc370b3c97cefce3ea7cb2486de05325919 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h @@ -0,0 +1,158 @@ +#pragma once +#include +#include +#include + +#include +#include +#include +#include +#include + +C10_DECLARE_bool(torch_jit_disable_warning_prints); +C10_DECLARE_bool(torch_jit_enable_rethrow_caught_exception); + +namespace at { +class Tensor; +TORCH_API void launch(std::function func); +} // namespace at +namespace c10 { +struct IValue; +struct OperatorName; +} // namespace c10 + +namespace torch::jit { + +// The interpreter run Graphs with Tensor inputs and Tensor outputs +// a separate component in the autograd handles unwrapping and wrapping +// variable objects for use in the interpreter. +namespace interpreter { +struct CodeImpl; +} + +struct Node; +struct GraphExecutor; +struct InterpreterStateImpl; +struct Graph; +struct Node; +struct Instruction; +using Stack = std::vector; +using c10::ivalue::Future; +using TaskLauncher = std::function)>; + +struct TORCH_API Code { + Code() = default; + explicit Code(interpreter::CodeImpl* pImpl); + // remaining_bailout_depth is irrelevant in a `Code` object unless the `Code` + // is directly created by `GraphExecutor` in which case it's likely to contain + // `prim::BailOut`s to control the maximum depth of bailout chains + explicit Code( + const std::shared_ptr& graph, + std::string function_name, + size_t remaining_bailout_depth = 0); + + const std::vector& grad_executors(); + const std::vector& diff_graph_op_executors(); + + explicit operator bool() const { + return pImpl != nullptr; + } + size_t num_inputs() const; + size_t num_outputs() const; + size_t num_bailouts() const; + const std::vector& constant_table() const; + const std::vector& type_table() const; + const std::vector& instructions() const; + const std::unordered_map& op_to_num_specified_args() + const; + const std::vector& instructions_source() const; + void request_bailout(size_t index); + size_t register_size() const; + + private: + std::shared_ptr pImpl; + friend struct InterpreterStateImpl; + friend std::ostream& operator<<(std::ostream& out, const Code& code); +}; + +struct TORCH_API MobileCode : Code { + explicit MobileCode( + const std::shared_ptr& graph, + std::string function_name, + bool emit_default_input_instructions = true, + bool support_default_args_before_out = true, + bool emit_promoted_ops = true, + size_t remaining_bailout_depth = 0); +}; + +struct InterpreterState { + TORCH_API InterpreterState( + const Code& code, + TaskLauncher taskLauncher = at::launch); + TORCH_API void run(Stack& stack); + TORCH_API c10::intrusive_ptr runAsync(Stack& stack); + c10::intrusive_ptr getFuture(); + + private: + InterpreterState(c10::intrusive_ptr pImpl); + // Ideally we should use c10::intrusive_ptr for pImpl; + // but intrusive_ptr requires full definition of InterpreterStateImpl, + // which we need to hide in the header. + c10::intrusive_ptr pImpl; + friend struct InterpreterStateImpl; +}; + +// Created by wait() +struct Suspend : public std::exception { + const char* what() const noexcept override { + return "Suspend"; + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit Suspend(c10::intrusive_ptr future_) + : future(std::move(future_)) {} + + c10::intrusive_ptr future; +}; + +// InterpreterContinuation propagates dist_autograd_context_id +// through (and only through) the forward pass manually, other +// thread local settings are propagated with ThreadLocalState +struct InterpreterContinuation { + InterpreterContinuation( + InterpreterState state_, + Stack stack_, + int64_t dist_autograd_context_id = 0, + c10::optional tls_state = c10::nullopt) + : state(std::move(state_)), + stack(std::move(stack_)), + tls_state_(std::move(tls_state)) +#ifdef USE_DISTRIBUTED + , + dist_autograd_context_id_(dist_autograd_context_id) +#endif + { + } + + void operator()(); + + private: + InterpreterState state; + Stack stack; + c10::optional tls_state_ = c10::nullopt; +#ifdef USE_DISTRIBUTED + int64_t dist_autograd_context_id_; +#endif +}; + +// what is the tensors type, including state from the current execution context +// that modifies how the tensor behaves. For instance if no_grad is enabled +// this will cause the TensorType to have requires_grad=False. +TORCH_API at::TensorTypePtr tensorTypeInCurrentExecutionContext( + const at::Tensor& t); + +// current (TLS) TorchScript interpreter callstack +TORCH_API std::vector currentCallstack(); +TORCH_API std::vector currentModuleHierarchy(); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h new file mode 100644 index 0000000000000000000000000000000000000000..728675ed7841835aba35771b4b1ba5e827d2c5d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API JITException : public std::runtime_error { + explicit JITException( + const std::string& msg, + c10::optional python_class_name = c10::nullopt, + c10::optional original_msg = c10::nullopt); + + c10::optional getPythonClassName() const { + return python_class_name_; + } + + // the original msg if this is from a python exception. The interpretor has + // changed the original message by adding "The following operation failed in + // the TorchScript interpreter." in front of it in the handleError function. + c10::optional getOriginalMsg() const { + return original_msg_; + } + + static const std::string& getCaughtOriginalMsg(); + static const std::string& getCaughtPythonClassName(); + static void setCaughtOriginalMsg(const std::string& msg); + static void setCaughtPythonClassName(const std::string& pythonClassName); + + private: + c10::optional python_class_name_; + c10::optional original_msg_; +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..12be844e35a91a8ca9e775bd030764b58452a172 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h @@ -0,0 +1,8 @@ +#include +#include + +namespace torch::jit { +TORCH_API std::shared_ptr TraceGraph( + std::shared_ptr graph, + Stack& stack); +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..b0b67c68088389bdd35e72c00cd7d1005399cb1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch::jit::logging { + +class LoggerBase { + public: + TORCH_API virtual void addStatValue( + const std::string& stat_name, + int64_t val) = 0; + virtual ~LoggerBase() = default; +}; + +TORCH_API LoggerBase* getLogger(); +TORCH_API LoggerBase* setLogger(LoggerBase* logger); + +// No-op logger. This is the default and is meant to incur almost no runtime +// overhead. + +class NoopLogger : public LoggerBase { + public: + void addStatValue(const std::string& stat_name, int64_t val) override {} + ~NoopLogger() override = default; +}; + +// Trivial locking logger. Pass in an instance of this to setLogger() to use it. +// This keeps track of the sum of all statistics. +// +// NOTE: this is not written in a scalable way and should probably only be used +// in the single-threaded case or for testing. +class TORCH_API LockingLogger : public LoggerBase { + public: + void addStatValue(const std::string& stat_name, int64_t val) override; + virtual int64_t getCounterValue(const std::string& name) const; + enum class AggregationType { SUM = 0, AVG = 1 }; + void setAggregationType(const std::string& stat_name, AggregationType type); + ~LockingLogger() override = default; + + private: + mutable std::mutex m; + struct RawCounter { + RawCounter() : sum(0), count(0) {} + int64_t sum; + size_t count; + }; + std::unordered_map raw_counters; + std::unordered_map agg_types; +}; + +// Make this struct so the timer internals are opaque to the user. +struct JITTimePoint { + std::chrono::time_point point; +}; + +TORCH_API JITTimePoint timePoint(); +TORCH_API void recordDurationSince( + const std::string& name, + const JITTimePoint& tp); + +namespace runtime_counters { +constexpr const char* GRAPH_EXECUTORS_CONSTRUCTED = + "pytorch_runtime.graph_executors_constructed"; +constexpr const char* GRAPH_EXECUTOR_INVOCATIONS = + "pytorch_runtime.graph_executor_invocations"; +constexpr const char* EXECUTION_PLAN_CACHE_HIT = + "pytorch_runtime.execution_plan_cache_hit"; +constexpr const char* EXECUTION_PLAN_CACHE_MISS = + "pytorch_runtime.execution_plan_cache_miss"; + +inline std::vector allRuntimeCounters() { + return { + GRAPH_EXECUTORS_CONSTRUCTED, + GRAPH_EXECUTOR_INVOCATIONS, + EXECUTION_PLAN_CACHE_HIT, + EXECUTION_PLAN_CACHE_MISS}; +} + +} // namespace runtime_counters + +} // namespace torch::jit::logging diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h new file mode 100644 index 0000000000000000000000000000000000000000..681a5a8e3415b3b1bac9f42bc2a2f4d2718f7764 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h @@ -0,0 +1,346 @@ +// in memory description of all ATen Ops similar to Caffe2 schema +// once C10 exists this can be removed, or stubbed out, but we need +// it now to implement correct semantic checking for script +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct Node; +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::Symbol; + +using OperationCreator = Operation (*)(const Node*); + +namespace { +const std::array kJitOnlyOperatorTags = { + at::Tag::pt2_compliant_tag}; +} + +/* + * Note: JIT relies on Operator instances having static lifetime, because + * it for example stores a non-owning FunctionSchema* pointer in the Node class, + * which points to the function schema stored in the Operator instance. + * Also, jit::Operator is meant to store more operator related information like + * symbolic derivatives, which also requires them to have static lifetime + * so that changes to symbolic derivatives are remembered. + * + * Currently, the JIT operator library contains a jit::Operator instance + * with a wrapper for each c10 operator. The c10 operator library registers + * those wrappers using listeners in register_c10_ops.cpp. + * TODO Instead of doing it this way, we should only have pure-jit ops in + * the jit library but have the JIT operator lookup look into the c10 library + * too. + */ + +// An Operator is a thin wrapper around either a pure JIT operator (e.g. prim +// ops) or a c10 operator, allowing some common operations and abstracting away +// the concrete operator nature. +struct TORCH_API Operator { + private: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + struct C10Operator final { + c10::OperatorHandle handle_; + Operation op_; + }; + struct UnparsedFunctionSchema final { + std::string schema_string_; + mutable c10::optional alias_analysis_; + }; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + struct JitOnlyOperator final { + // The only valid transition for schema_ is from right->left, i.e. + // when the schema gets parsed. + mutable std::variant schema_; + + std::variant op_; + }; + + public: + Operator(c10::OperatorHandle opHandle, Operation operation) + : op_(C10Operator( + C10Operator{std::move(opHandle), std::move(operation)})) {} + + Operator( + std::string schema, + Operation op, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + UnparsedFunctionSchema{std::move(schema), alias_analysis}, + Operation(std::move(op))}) {} + + Operator( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + Operation op, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + FunctionSchema(varArgSchemaWithName( + std::move(name), + std::move(overload_name), + std::move(arguments), + std::move(returns), + alias_analysis)), + std::move(op)}) {} + + Operator( + std::string schema, + OperationCreator op_creator, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + UnparsedFunctionSchema{std::move(schema), alias_analysis}, + op_creator}) {} + + // Helper constructor to register `op` to run + // run for _every_ IR Node where n.kind() == name, regardless of arguments. + // This is accomplished by marking the schema varargs and having no required + // arguments. + Operator( + Symbol name, + OperationCreator op_creator, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + FunctionSchema(varArgSchemaWithName(name, alias_analysis)), + op_creator}) {} + + Operation getOperation(const Node* node = nullptr) const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) { return op.op_; }, + [node](const JitOnlyOperator& op) { + return std::visit( + c10::overloaded( + [](const Operation& op) { return op; }, + [node](const OperationCreator& op_creator) { + return op_creator(node); + }), + op.op_); + }), + op_); + } + + Operation getOperationForDispatchKey(c10::DispatchKey dk) const { + // TODO: some sort of caching mechanism? + return std::visit( + c10::overloaded( + [dk](const C10Operator& op) { + return Operation([op, dk](Stack& stack) { + op.handle_.callBoxedForDispatchKey(dk, stack); + }); + }, + [](const JitOnlyOperator& op) { + TORCH_CHECK( + false, + "calling a JIT operator for dispatch key is not supported"); + return Operation(nullptr); + }), + op_); + } + + const FunctionSchema& schema() const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) -> const FunctionSchema& { + return op.handle_.schema(); + }, + [](const JitOnlyOperator& op) -> const FunctionSchema& { + // we lazily parse schema initialized from strings so that + // we do less work during static operator registration + if (op.schema_.index() == 1) { + auto& unmaterializedSchema = + std::get(op.schema_); + FunctionSchema schema = + parseSchema(unmaterializedSchema.schema_string_); + if (unmaterializedSchema.alias_analysis_.has_value()) { + // TODO What if it gets set later? + schema.setAliasAnalysis( + *unmaterializedSchema.alias_analysis_); + } + op.schema_ = std::move(schema); + } + return std::get(op.schema_); + }), + op_); + } + + c10::ArrayRef getTags() const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) { return op.handle_.getTags(); }, + [](const JitOnlyOperator& op) { + // JitOnlyOperators don't have an c10::OperatorHandle or a way to + // specify tags. We're grandfathering them all into + // pt2_compliant_tag, but for anything else, please just stop + // using JitOnlyOperator. + return c10::ArrayRef(kJitOnlyOperatorTags); + }), + op_); + } + + bool isC10Op() const { + return op_.index() == 0; + } + + c10::AliasAnalysisKind aliasAnalysisKind() const { + const FunctionSchema& schemaRef = schema(); + c10::AliasAnalysisKind alias_analysis = schemaRef.aliasAnalysis(); + + TORCH_CHECK( + alias_analysis == AliasAnalysisKind::FROM_SCHEMA || + !schemaRef.hasAnyAliasInfo(), + "In operator registration: Tried to register operator ", + schemaRef, + " with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA."); + return alias_analysis; + } + + bool hasOperation() const { + return std::visit( + c10::overloaded( + [](const C10Operator&) { return true; }, + [](const JitOnlyOperator& op) { return op.op_.index() == 0; }), + op_); + } + + private: + static FunctionSchema varArgSchemaWithName( + Symbol name, + AliasAnalysisKind alias_analysis) { + auto result = FunctionSchema( + name, + "", + {}, + {}, + /*is_vararg*/ true, + /*is_varret*/ true); + result.setAliasAnalysis(alias_analysis); + return result; + } + + static FunctionSchema varArgSchemaWithName( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + AliasAnalysisKind alias_analysis) { + auto result = FunctionSchema( + std::move(name), + std::move(overload_name), + std::move(arguments), + std::move(returns), + /*is_vararg*/ false, + /*is_varret*/ false); + result.setAliasAnalysis(alias_analysis); + return result; + } + + std::variant op_; +}; + +TORCH_API std::string canonicalSchemaString(const FunctionSchema& schema); + +TORCH_API const std::vector> getAllOperators(); +TORCH_API const std::vector>& getAllOperatorsFor( + Symbol name); +// Returns operators in the order which OpOverloadPacket resolves them. +TORCH_API std::vector> getAllSortedOperatorsFor( + Symbol name); + +// given a operator with an overload name, find the specific operator related to +// it, may return nullptr if no operator exists. +TORCH_API std::shared_ptr findOperatorFor( + const c10::OperatorName& full_name); + +TORCH_API std::vector findSimilarOperators(Symbol input_op); + +TORCH_API void registerOperator(Operator&& op); +TORCH_API void deregisterOperator(const FunctionSchema& schema); + +// XXX: this function is meant to be used with string literals only! +TORCH_API std::shared_ptr getOperatorForLiteral( + const char* signature); + +// Ensure the thing that registers c10 ops is defined. +// Otherwise, our registry will not have c10 ops. You can run into this +// scenario if you're querying registered ops during static init. +// +// This fn is defined in register_c10_ops.cpp +TORCH_API void ensure_c10_registerer_defined(); + +// Used to assert that unschematized operators have an analysis method written +TORCH_API bool aliasAnalysisHasSpecialCaseFor(c10::Symbol sym); + +// A factory function to generate an optional operator. It has two +// instantiations depending on the template bool arg value. The arg can be a +// compile-time function for the selective op registration based on schema +// string. +template +c10::optional OperatorGenerator( + const char* schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return c10::optional(Operator( + std::string(schema_str), std::forward(op), alias_analysis)); +} + +template +c10::optional OperatorGenerator( + torch::detail::SelectiveStr schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return OperatorGenerator( + static_cast(schema_str), + std::forward(op), + alias_analysis); +} + +template +c10::optional OperatorGenerator( + torch::detail::SelectiveStr schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return c10::nullopt; +} + +template +c10::optional OperatorGenerator( + const std::string name, + const std::string overload_name, + const std::vector arguments, + const std::vector returns, + Func&& op, + AliasAnalysisKind alias_analysis) { + return c10::optional(Operator( + name, + overload_name, + arguments, + returns, + std::forward(op), + alias_analysis)); +} + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h new file mode 100644 index 0000000000000000000000000000000000000000..50c41fc3ad39d44262b4da8e54fd4b75b00d8f2d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::jit { + +using AliasAnalysisKind = c10::AliasAnalysisKind; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..36feaffb200b655bd452ff822ae7af5149bc2670 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +#include + +namespace torch::jit { + +using PrintHandler = void (*)(const std::string&); + +TORCH_API PrintHandler getDefaultPrintHandler(); +TORCH_API PrintHandler getPrintHandler(); +TORCH_API void setPrintHandler(PrintHandler ph); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..6b8493dc56d211f42c19888f5c08daeb511f48aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h @@ -0,0 +1,77 @@ +#pragma once +#include +#include +#include + +C10_DECLARE_bool(torch_jit_static_then_dynamic); + +C10_DECLARE_bool(torch_jit_always_dynamic); + +namespace torch::jit { + +TORCH_API void runNooptPassPipeline(std::shared_ptr& graph); + +struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase { + ProfilingGraphExecutorImpl( + const std::shared_ptr& graph, + std::string function_name); + + const ExecutionPlan& getPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth) override; + GraphExecutorState getDebugState() override; + ~ProfilingGraphExecutorImpl() override = default; + + void debugFlushCompilationCache() { + std::lock_guard lock(compile_mutex); + pr_.reset(); + fallback_plan_.reset(); + profiling_plan_.reset(); + optimized_plan_.reset(); + // prevent memory leaks + fallback_functions_.clear(); + remaining_bailout_depth_.reset(); + // TODO - would be nice to have it initialized in subsequent use + fusion_strategy_ = getFusionStrategy(); + } + + bool isOptimized() const override { + return optimized_plan_.has_value(); + } + + private: + const ExecutionPlan& getOptimizedPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth); + void runProfilingInsensitiveOptimizations(std::shared_ptr& graph); + void runProfilingOptimizations( + std::shared_ptr& graph, + size_t remaining_depth); + void replaceFallbackGraphWithFallbackFunction(Block* b); + FusionBehavior getCurrentBehavior(size_t remaining_depth); + size_t getInstantiatedBailoutDepth(); + void runNoGradOptimizations( + std::shared_ptr& graph, + size_t remaining_bailout_depth); + void runFinalOptimizations(std::shared_ptr& graph); + std::unique_ptr pr_; + c10::optional + profiling_plan_; // plan to run in order to profiling the code + c10::optional optimized_plan_; + FusionStrategy fusion_strategy_; + + // this plan is used if getGraphExecutorOptimize is unset + c10::optional fallback_plan_; + // fallback functions are inserted for tensorexpr fusion groups + // and by specialize_autogradzero. Whenever, at runtime, input + // tensor don't match profiled properties, fallback functions are called + // They are the deoptimized version of the logic in fusion groups + // and/or autograd. + // The fallback functions are owned by a GraphExecutor instance + // They only exist in the optimized graph which is a private property + // of the GraphExecutor and only shared with InterpreterState + std::vector> fallback_functions_; + c10::optional remaining_bailout_depth_; +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..2a269931afbc0b83f5d423fa03c2042daed5db26 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h @@ -0,0 +1,885 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { +constexpr inline c10::AliasAnalysisKind aliasAnalysisFromSchema() { + return c10::AliasAnalysisKind::FROM_SCHEMA; +} + +constexpr inline c10::AliasAnalysisKind aliasAnalysisConservative() { + return c10::AliasAnalysisKind::CONSERVATIVE; +} + +constexpr inline c10::AliasAnalysisKind aliasAnalysisSpecialCase() { + return c10::AliasAnalysisKind::INTERNAL_SPECIAL_CASE; +} + +template +c10::List make_result_list(const TypePtr& elemType) { + return c10::List(); +} + +template <> +c10::impl::GenericList make_result_list(const TypePtr& elemType); + +// As described in https://docs.python.org/3/library/functions.html#round +// When a number is exactly halfway between two integers, python builtin round +// function will round to even number. We use round(x/2)*2 to handle the +// special halfway case. For positive 'x', round(x/2)*2 = +// round((x_e + x_r)/2)*2 = x_e + round(x_r/2)*2, where x_e is an even integer, +// x_r is either 0.5 of 1.5, round(x_r/2)*2 results a 0 or 2, so the final +// result will always be a even number. Due to symmetricity, it also applies to +// negative cases. +inline double round_to_even(double a) { + return a - std::floor(a) == 0.5 ? (std::round(a * 0.5) * 2.0) : std::round(a); +} + +// using the rules from python_arg_parser FunctionParameter::check +// tensor cannot have grad set, tensor must be 0 dim, +// and if the dest is an int the source must be integral type +void checkImplicitTensorToNum(const at::Tensor& t, bool toInt); + +static C10_UNUSED int64_t floordiv(int64_t a, int64_t b) { + if (b == 0) { + throw std::runtime_error("division by 0"); + } + if ((a > 0) == (b > 0)) { + // simple case, both have same sign + return a / b; + } else { + // in python division rounds down, it doesn't not truncate like in c++ + auto r = lldiv(a, b); + return (r.rem) ? r.quot - 1 : r.quot; + } +} +TORCH_API void checkDoubleInRange(double a); +static C10_UNUSED int64_t floor(double a) { + checkDoubleInRange(a); + return std::floor(a); +} +static C10_UNUSED int64_t ceil(double a) { + checkDoubleInRange(a); + return std::ceil(a); +} + +static C10_UNUSED int64_t gcd(int64_t a, int64_t b) { + while (b != 0) { + int64_t r = a % b; + a = b; + b = r; + } + // in python gcd returns non-negative values + return std::abs(a); +} + +int64_t partProduct(int n, int m); + +void loop(int n, int64_t& p, int64_t& r); + +int nminussumofbits(int v); + +int64_t factorial(int n); +static const double degToRad = std::acos(-1.0) / 180.0; +static const double radToDeg = 180.0 / std::acos(-1.0); +double degrees(double x); +double radians(double x); + +// Convert an python index (which may be negative) into an index usable for a +// C++ container + +// Equivalent to list.at(idx) +template +T getItem(const c10::List& list, int64_t idx) { + const int64_t list_size = list.size(); + const int64_t normalized_idx = normalizeIndex(idx, list_size); + if (normalized_idx < 0 || normalized_idx >= list_size) { + throw std::out_of_range("list index out of range"); + } + return list.get(normalized_idx); +} + +template +void setItem(const c10::List& list, int64_t idx, T&& value) { + const int64_t list_size = list.size(); + const int64_t normalized_idx = normalizeIndex(idx, list_size); + if (normalized_idx < 0 || normalized_idx >= list_size) { + throw std::out_of_range("list index out of range"); + } + list.set(normalized_idx, std::forward(value)); +} + +void listAppend(Stack& stack); + +void listReverse(Stack& stack); + +template +void minList(Stack& stack) { + c10::List a = pop(stack).to>(); + c10::List b = pop(stack).to>(); + + size_t min_size = std::min(a.size(), b.size()); + for (const auto i : c10::irange(min_size)) { + if (a[i] == b[i]) { + continue; + } + + push(stack, a[i] < b[i] ? a : b); + return; + } + + push(stack, b.size() < a.size() ? b : a); +} + +template +void maxList(Stack& stack) { + c10::List a = pop(stack).to>(); + c10::List b = pop(stack).to>(); + + size_t min_size = std::min(a.size(), b.size()); + for (const auto i : c10::irange(min_size)) { + if (a[i] == b[i]) { + continue; + } + + push(stack, a[i] > b[i] ? a : b); + return; + } + + push(stack, b.size() > a.size() ? b : a); +} + +void listPopImpl(Stack& stack, const char* empty_message); + +void listPop(Stack& stack); + +void listClear(Stack& stack); + +void listDelete(Stack& stack); + +void listInsert(Stack& stack); + +template +void listRemove(Stack& stack) { + T elem = pop(stack).to(); + c10::List list = pop(stack).to>(); + + auto pos = std::find(list.begin(), list.end(), elem); + + if (pos != list.end()) { + list.erase(pos); + } else { + AT_ERROR("list.remove(x): x not in list"); + } +} + +template +void listMin(Stack& stack) { + c10::List list = pop(stack).to>(); + size_t list_size = list.size(); + if (list_size == 0) { + throw std::runtime_error("min() arg is an empty sequence"); + } + + T min_elem = list[0]; + for (const auto i : c10::irange(1, list_size)) { + T elem = list[i]; + min_elem = elem < min_elem ? elem : min_elem; + } + + stack.push_back(min_elem); +} + +template +void listMax(Stack& stack) { + c10::List list = pop(stack).to>(); + size_t list_size = list.size(); + if (list_size == 0) { + throw std::runtime_error("max() arg is an empty sequence"); + } + + T max_elem = list[0]; + for (const auto i : c10::irange(1, list_size)) { + T elem = list[i]; + max_elem = elem > max_elem ? elem : max_elem; + } + + stack.push_back(max_elem); +} + +template <> +void listRemove(Stack& stack); + +template +void listIndex(Stack& stack) { + T elem = pop(stack).to(); + c10::List list = pop(stack).to>(); + + auto pos = std::find(list.begin(), list.end(), elem); + + if (pos != list.end()) { + push(stack, static_cast(std::distance(list.begin(), pos))); + } else { + AT_ERROR("'", elem, "' is not in list"); + } +} + +template <> +void listIndex(Stack& stack); + +template +void listCount(Stack& stack) { + T elem = pop(stack).to(); + c10::List list = pop(stack).to>(); + + const int64_t count = std::count(list.begin(), list.end(), elem); + push(stack, count); +} + +template <> +void listCount(Stack& stack); + +void listExtend(Stack& stack); + +void listCopy(Stack& stack); + +void listSelect(Stack& stack); + +void listLen(Stack& stack); + +template +void listEq(Stack& stack) { + c10::List b = pop(stack).to>(); + c10::List a = pop(stack).to>(); + push(stack, a == b); +} + +template +void listNe(Stack& stack) { + c10::List b = pop(stack).to>(); + c10::List a = pop(stack).to>(); + push(stack, a != b); +} + +inline bool tensor_list_equal( + const c10::List& a, + const c10::List& b) { + if (a.size() != b.size()) { + return false; + } + + for (const auto i : c10::irange(a.size())) { + const at::Tensor& a_element = a[i]; + const at::Tensor& b_element = b[i]; + // This preserves Python's semantics, which uses eq() to compare two + // elements, then passes the result to bool(). + // see: https://docs.python.org/3.4/reference/datamodel.html#object.__ge__ + const auto cmp_result = a_element.eq(b_element); + if (!at::native::is_nonzero(cmp_result)) { + return false; + } + } + + return true; +} + +// Specialization for at::Tensor, since it doesn't define operator== +template <> +void listEq(Stack& stack); + +// Specialization for at::Tensor, since it doesn't define operator== +template <> +void listNe(Stack& stack); + +void listList(Stack& stack); + +template +void listContains(Stack& stack) { + auto key = pop(stack).to(); + auto list = pop(stack).to>(); + // NOLINTNEXTLINE(performance-implicit-conversion-in-loop) + for (const T& item : list) { + if (item == key) { + push(stack, true); + return; + } + } + push(stack, false); +} + +void listAdd(Stack& stack); + +void listInplaceAdd(Stack& stack); + +void listMulIntLeftInPlace(Stack& stack); + +void listMulIntLeft(Stack& stack); + +void listMulIntRight(Stack& stack); + +void listSlice(Stack& stack); + +template +void listSort(Stack& stack) { + bool reverse = pop(stack).toBool(); + c10::List list = pop(stack).to>(); + std::sort(list.begin(), list.end(), [reverse](const T& a, const T& b) { + // FBCode errors without this check - "strict weak ordering" + // TODO: remove when possible, since it just slows down + // sorting and doesn't do anything useful + if (a == b) { + return false; + } + return (a < b) != reverse; + }); +} + +// Specialization for at::Tensor +template <> +void listSort(Stack& stack); + +template +void listCopyAndSort(Stack& stack) { + c10::List list = pop(stack).to>(); + auto list_copied = list.copy(); + std::sort(list_copied.begin(), list_copied.end(), [](const T& a, const T& b) { + // "strict weak ordering" issue - see other sort + if (a == b) { + return false; + } + return a < b; + }); + push(stack, list_copied); +} + +// Specialization for at::Tensor +template <> +void listCopyAndSort(Stack& stack); + +void listSetItem(Stack& stack); + +struct OperatorGeneratorArgs { + const char* schema_str; + bool isOperationCreator; + union { + void (*operation)(Stack&); + OperationCreator operationCreator; + }; + AliasAnalysisKind aliasAnalysis; + + explicit constexpr OperatorGeneratorArgs( + torch::detail::SelectiveStr schema_str, + void (*op)(Stack&), + AliasAnalysisKind aa) + : schema_str(schema_str), + isOperationCreator(false), + operation(op), + aliasAnalysis(aa) {} + + explicit constexpr OperatorGeneratorArgs( + torch::detail::SelectiveStr schema_str, + OperationCreator opCreator, + AliasAnalysisKind aa) + : schema_str(schema_str), + isOperationCreator(true), + operationCreator(opCreator), + aliasAnalysis(aa) {} + + template + explicit constexpr OperatorGeneratorArgs( + torch::detail::SelectiveStr, + Args...) + : schema_str(nullptr), + isOperationCreator(false), + operation(nullptr), + aliasAnalysis(AliasAnalysisKind::INTERNAL_SPECIAL_CASE) {} +}; + +#define DEFINE_GENERIC_BINARY_OP( \ + aten_op, op, int_float_result, complex_result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".int_int(int a, int b) -> " #int_float_result), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op \ + ".float_float(float a, float b) -> " #int_float_result), \ + [](Stack& stack) { \ + double a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op \ + ".complex_complex(complex a, complex b) -> " #complex_result), \ + [](Stack& stack) { \ + c10::complex a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +// define implementations for primitive number ops +#define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, int_op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".float(float a, float b) -> " #float_result), \ + [](Stack& stack) { \ + double a, b; \ + pop(stack, a, b); \ + push(stack, float_op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_INT_FLOAT_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".int_float(int a, float b) -> " #result), \ + [](Stack& stack) { \ + int64_t a; \ + double b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".float_int(float a, int b) -> " #result), \ + [](Stack& stack) { \ + double a; \ + int64_t b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_INT_OP(aten_op, op) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> int"), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_STR_CMP_OP(aten_op, op) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".str(str a, str b) -> bool"), \ + [](Stack& stack) { \ + auto b = pop(stack).toStringRef(); \ + auto a = pop(stack).toStringRef(); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +// define a primitive op over Scalar operands. +// it's necessary to register this overload following +// int/float variations to avoid trapping Scalar args +// in unintended implicit conversions +#define DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, result, string_val) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op string_val \ + "(Scalar a, Scalar b) -> " #result), \ + [](Stack& stack) { \ + IValue x, y; \ + pop(stack, x, y); \ + if (x.isDouble()) { \ + if (y.isDouble()) { \ + double a = x.toDouble(); \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + double a = x.toDouble(); \ + int64_t b = y.toInt(); \ + push(stack, float_op); \ + } \ + } else { \ + if (y.isDouble()) { \ + int64_t a = x.toInt(); \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t a = x.toInt(); \ + int64_t b = y.toInt(); \ + push(stack, int_op); \ + } \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP(aten_op, int_op, float_op, result) \ + DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, result, "") + +#define DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION( \ + aten_op, int_op, float_op, result) \ + DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, result, ".Scalar_Scalar") + +#define DEFINE_BINARY_OP(aten_op, op) \ + DEFINE_GENERIC_OP(aten_op, op, op, int, float), \ + DEFINE_INT_FLOAT_OP(aten_op, op, float), \ + DEFINE_SCALAR_BINARY_OP(aten_op, op, op, Scalar) + +#define DEFINE_BINARY_FLOAT_OP(aten_op, op) \ + DEFINE_GENERIC_OP(aten_op, op, op, float, float), \ + DEFINE_INT_FLOAT_OP(aten_op, op, float), \ + DEFINE_SCALAR_BINARY_OP(aten_op, op, op, float) + +#define DEFINE_COMPARISON_OP(aten_op, op) \ + DEFINE_GENERIC_OP(aten_op, op, op, bool, bool), \ + DEFINE_INT_FLOAT_OP(aten_op, op, bool), \ + DEFINE_SCALAR_BINARY_OP(aten_op, op, op, bool), \ + DEFINE_STR_CMP_OP(aten_op, op) + +#define DEFINE_UNARY_INT_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a) -> " #result), \ + [](Stack& stack) { \ + int64_t a; \ + pop(stack, a); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_UNARY_FLOAT_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".float(float a) -> " #result), \ + [](Stack& stack) { \ + double a; \ + pop(stack, a); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_UNARY_OP(aten_op, op, int_result, float_result) \ + DEFINE_UNARY_INT_OP(aten_op, op, int_result), \ + DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \ + [](Stack& stack) { \ + IValue x; \ + pop(stack, x); \ + if (x.isDouble()) { \ + double a = x.toDouble(); \ + push(stack, static_cast(op)); \ + } else { \ + int64_t a = x.toInt(); \ + push(stack, static_cast(op)); \ + } \ + }, \ + aliasAnalysisFromSchema()) +#define DEFINE_BOOL_OP(aten_op, op) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".bool(bool a, bool b) -> bool"), \ + [](Stack& stack) { \ + bool a, b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) +#define DEFINE_STRING_OP(op_name, string_op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#op_name ".str(str a, str b) ->" #result), \ + [](Stack& stack) { \ + auto b = pop(stack).toStringRef(); \ + auto a = pop(stack).toStringRef(); \ + push(stack, string_op); \ + }, \ + aliasAnalysisFromSchema()) + +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +//----------------------------------------------------------------------------- +#define DEFINE_UNARY_COMPLEX_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".complex(complex a) -> " #result), \ + [](Stack& stack) { \ + c10::complex a; \ + pop(stack, a); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +// Some complex unary ops (like abs, angle) return real valued output, but most +// other unary ops return complex valued output. So, this macro is used in the +// former case where we can explicitly pass complex_result_cast argument, which +// is set to c10::complex in the macro `DEFINE_UNARY_OP_WITH_COMPLEX` +// defined below. +#define DEFINE_UNARY_OP_WITH_COMPLEX_CAST( \ + aten_op, \ + op, \ + int_result, \ + float_result, \ + complex_result, \ + complex_result_cast) \ + DEFINE_UNARY_INT_OP(aten_op, op, int_result), \ + DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \ + DEFINE_UNARY_COMPLEX_OP(aten_op, op, complex_result), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \ + [](Stack& stack) { \ + IValue x; \ + pop(stack, x); \ + if (x.isDouble()) { \ + double a = x.toDouble(); \ + push(stack, static_cast(op)); \ + } else if (x.isComplexDouble()) { \ + c10::complex a = x.toComplexDouble(); \ + push(stack, static_cast(op)); \ + } else { \ + int64_t a = x.toInt(); \ + push(stack, static_cast(op)); \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_UNARY_OP_WITH_COMPLEX(aten_op, op, int_result, float_result) \ + DEFINE_UNARY_OP_WITH_COMPLEX_CAST( \ + aten_op, op, int_result, float_result, complex, c10::complex) + +#define DEFINE_GENERIC_OP_WITH_COMPLEX( \ + aten_op, \ + int_op, \ + float_op, \ + complex_op, \ + int_result, \ + float_result, \ + complex_result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \ + [](Stack& stack) { \ + int64_t a, b; \ + pop(stack, a, b); \ + push(stack, int_op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".complex(complex a, complex b) -> " #complex_result), \ + [](Stack& stack) { \ + c10::complex a, b; \ + pop(stack, a, b); \ + push(stack, complex_op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".float(float a, float b) -> " #float_result), \ + [](Stack& stack) { \ + double a, b; \ + pop(stack, a, b); \ + push(stack, float_op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_INT_COMPLEX_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op \ + ".int_complex(int a, complex b) -> " #result), \ + [](Stack& stack) { \ + int64_t a; \ + c10::complex b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".complex_int(complex a, int b) -> " #result), \ + [](Stack& stack) { \ + c10::complex a; \ + int64_t b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_FLOAT_COMPLEX_OP(aten_op, op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".float_complex(float a, complex b) -> " #result), \ + [](Stack& stack) { \ + double a; \ + c10::complex b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()), \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA( \ + #aten_op ".complex_float(complex a, float b) -> " #result), \ + [](Stack& stack) { \ + c10::complex a; \ + double b; \ + pop(stack, a, b); \ + push(stack, op); \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, complex_op, result, string_val) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op string_val \ + "(Scalar a, Scalar b) -> " #result), \ + [](Stack& stack) { \ + IValue x, y; \ + pop(stack, x, y); \ + if (x.isComplexDouble()) { \ + c10::complex a = x.toComplexDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, complex_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, complex_op); \ + } \ + } else if (x.isDouble()) { \ + double a = x.toDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, float_op); \ + } \ + } else { \ + int64_t a = x.toInt(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, int_op); \ + } \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_WITHOUT_INT_COMPLEX_PAIR( \ + aten_op, int_op, float_op, complex_op, result) \ + OperatorGeneratorArgs( \ + TORCH_SELECTIVE_SCHEMA(#aten_op "(Scalar a, Scalar b) -> " #result), \ + [](Stack& stack) { \ + IValue x, y; \ + pop(stack, x, y); \ + if (x.isComplexDouble()) { \ + c10::complex a = x.toComplexDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, complex_op); \ + } \ + } else if (x.isDouble()) { \ + double a = x.toDouble(); \ + if (y.isComplexDouble()) { \ + c10::complex b = y.toComplexDouble(); \ + push(stack, complex_op); \ + } else if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else { \ + int64_t b = y.toInt(); \ + push(stack, float_op); \ + } \ + } else { \ + int64_t a = x.toInt(); \ + if (y.isDouble()) { \ + double b = y.toDouble(); \ + push(stack, float_op); \ + } else if (y.isInt()) { \ + int64_t b = y.toInt(); \ + push(stack, int_op); \ + } \ + } \ + }, \ + aliasAnalysisFromSchema()) + +#define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX( \ + aten_op, int_op, float_op, complex_op, result) \ + DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_AVOID_COLLISION_GENERIC( \ + aten_op, int_op, float_op, complex_op, result, "") + +#define DEFINE_BINARY_OP_WITH_COMPLEX(aten_op, op) \ + DEFINE_GENERIC_OP_WITH_COMPLEX(aten_op, op, op, op, int, float, complex), \ + DEFINE_INT_COMPLEX_OP(aten_op, op, complex), \ + DEFINE_FLOAT_COMPLEX_OP(aten_op, op, complex), \ + DEFINE_INT_FLOAT_OP(aten_op, op, float), \ + DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX(aten_op, op, op, op, Scalar) + +#define DEFINE_COMPARISON_OP_WITH_COMPLEX(aten_op, op) \ + DEFINE_GENERIC_OP_WITH_COMPLEX(aten_op, op, op, op, bool, bool, bool), \ + DEFINE_INT_FLOAT_OP(aten_op, op, bool), \ + DEFINE_FLOAT_COMPLEX_OP(aten_op, op, bool), \ + DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_WITHOUT_INT_COMPLEX_PAIR( \ + aten_op, op, op, op, bool), \ + DEFINE_STR_CMP_OP(aten_op, op) + +TORCH_API at::Generator make_generator_for_device( + c10::Device device, + c10::optional seed = c10::nullopt); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h new file mode 100644 index 0000000000000000000000000000000000000000..8e08255687cf79109dd2247f90a01c9704415f3d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h @@ -0,0 +1,99 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace torch::jit { +namespace profiling { + +struct Datapoint { + using Timepoint = std::chrono::time_point; + SourceRange sourceRange; + Timepoint start; + Timepoint end; + + explicit Datapoint(SourceRange sr) + : sourceRange(std::move(sr)), start(std::chrono::steady_clock::now()) {} +}; + +class TORCH_API InstructionSpan { + public: + explicit InstructionSpan(Node&); + ~InstructionSpan(); + InstructionSpan(InstructionSpan&&) = delete; + InstructionSpan& operator=(InstructionSpan&&) = delete; + + private: + std::unique_ptr datapoint_; +}; + +} // namespace profiling + +struct TORCH_API InstructionStats : public CustomClassHolder { + int64_t count{0}; + std::chrono::nanoseconds duration{0}; +}; + +class TORCH_API SourceStats : public CustomClassHolder { + public: + using LineMap = c10::Dict>; + + SourceStats(SourceRef source, LineMap lineMap) + : source_(std::move(source)), lineMap_(std::move(lineMap)) {} + + const SourceRef& getSourceRef() const { + return source_; + } + + const LineMap& getLineMap() const { + return lineMap_; + } + + private: + SourceRef source_; + LineMap lineMap_; +}; + +/** + * ScriptProfile is an underlying C++ implementation for TorchScript profiling. + * The profiling section is specified by calling enable() and disable(): + * + * ... + * scriptProfile.enable(); + * ... + * (scripts) + * ... + * scriptProfile.disable(); + * ... + * + * To retrieve collected runtime data, users may call dumpStats() and do + * arbitrary filtering on the data they want. Note that dumpStats() should + * not be called inside a profiling section. + * In general, stats are aggregated per source function body, and then by line + * number. + */ +class TORCH_API ScriptProfile : public CustomClassHolder { + // Aggregates datapoints by function source id, then by line number. + using LineMap = std::map; + using SourceMap = std::map>; + + public: + void enable(); + void disable(); + const SourceMap& dumpStats(); + void addDatapoint(std::shared_ptr); + ~ScriptProfile() override; + + private: + bool enabled_{false}; + std::vector> datapoints_; + SourceMap sourceMap_; +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..e822f3f93e3d29d533f27e8565d7a0de787f33b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedShapeFunctions(); + +TORCH_API const OperatorMap& GetShapeFunctionMappings(); + +TORCH_API const OperatorMap>& +GetBoundedShapeMappings(); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..533b1f11020763e2d6d1d05734c6a4b09bcc44aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedFuncs(); + +TORCH_API const OperatorMap& GetFuncMapping(); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..34272000f0d1a3e2e808ce2bbe27ec4ab299380e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h @@ -0,0 +1,23 @@ +#pragma once +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API SimpleGraphExecutorImpl : public GraphExecutorImplBase { + SimpleGraphExecutorImpl( + const std::shared_ptr& graph, + std::string function_name); + + const ExecutionPlan& getPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth) override; + GraphExecutorState getDebugState() override; + ~SimpleGraphExecutorImpl() override = default; + + private: + c10::optional execution_plan_; +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h new file mode 100644 index 0000000000000000000000000000000000000000..64e0d6661baebc3bb0c82831a8566dba3e0112f6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h @@ -0,0 +1,18 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include +#include + +namespace torch::jit { +struct GradientPair { + std::shared_ptr forward; + std::shared_ptr backward; +}; + +TORCH_API c10::optional gradientInfoForSchema( + const FunctionSchema& schema); +TORCH_API bool hasGradientInfoForSchema(const FunctionSchema& schema); +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0be53d4ffeb28b910e1c3f9d3eb1115a7e527784 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h @@ -0,0 +1,41 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace torch::jit { + +void tupleUnpack(Stack& stack); + +void format(Stack& stack, size_t num_inputs); + +void einsum(Stack& stack, size_t num_inputs); + +void percentFormat(Stack& stack, size_t num_inputs); + +void listUnpack(Stack& stack, size_t num_outputs); + +void tupleConstruct(Stack& stack, size_t num_inputs); + +void namedTupleConstruct(Stack& stack, c10::TypePtr type, size_t num_inputs); + +void listConstruct(Stack& stack, const c10::Type& list_type, size_t num_inputs); + +void dictConstruct(Stack& stack, const c10::Type& type, size_t num_inputs); + +// as weak_ref will create a Object with a non-owning CompilationUnit reference, +// for use as a constant in the Graph to avoid a reference cycle +void createObject( + Stack& stack, + const at::ClassTypePtr& type, + bool as_weak_ref = false); + +void isinstance(Stack& stack, at::ArrayRef types); + +void tupleSlice(Stack& stack, size_t begin, size_t end); + +void dequantize(Stack& stack); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h new file mode 100644 index 0000000000000000000000000000000000000000..e8dcd4f2c5b0b95f0f727e97794ddc321fe49fc7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h @@ -0,0 +1,17 @@ +#pragma once +#include + +namespace torch::jit { + +// a wrapper to mark places where we expect all the at::Tensors to be +// variables +struct variable_tensor_list : public std::vector { + variable_tensor_list() = default; + template + variable_tensor_list(InputIt first, InputIt last) + : std::vector(first, last) {} + explicit variable_tensor_list(std::vector&& tensor) + : std::vector(std::move(tensor)) {} +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h new file mode 100644 index 0000000000000000000000000000000000000000..6e9290f5130baffe4c1fbbadb61e80b6c88d46d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Graph; + +namespace testing { + +struct FileCheckImpl; + +struct FileCheck { + public: + TORCH_API explicit FileCheck(); + TORCH_API ~FileCheck(); + + // Run FileCheck against test string + TORCH_API void run(const std::string& test_string); + + // Run FileCheck against dump of graph IR + TORCH_API void run(const Graph& graph); + + // Parsing input checks string and run against test string / dump of graph IR + TORCH_API void run( + const std::string& input_checks_string, + const std::string& test_string); + TORCH_API void run( + const std::string& input_checks_string, + const Graph& graph); + + // Checks that the string occurs, starting at the end of the most recent match + TORCH_API FileCheck* check(const std::string& str); + + // Checks that the string does not occur between the previous match and next + // match. Consecutive check_nots test against the same previous match and next + // match + TORCH_API FileCheck* check_not(const std::string& str); + + // Checks that the string occurs on the same line as the previous match + TORCH_API FileCheck* check_same(const std::string& str); + + // Checks that the string occurs on the line immediately following the + // previous match + TORCH_API FileCheck* check_next(const std::string& str); + + // Checks that the string occurs count number of times, starting at the end + // of the previous match. If exactly is true, checks that there are exactly + // count many matches + TORCH_API FileCheck* check_count( + const std::string& str, + size_t count, + bool exactly = false); + + // A series of consecutive check_dags get turned into a group of checks + // which can appear in any order relative to each other. The checks begin + // at the end of the previous match, and the match for the check_dag group + // is the minimum match of all individual checks to the maximum match of all + // individual checks. + TORCH_API FileCheck* check_dag(const std::string& str); + + // Checks that source token is highlighted in str (usually an error message). + TORCH_API FileCheck* check_source_highlighted(const std::string& str); + + // Checks that the regex matched string occurs, starting at the end of the + // most recent match + TORCH_API FileCheck* check_regex(const std::string& str); + + // reset checks + TORCH_API void reset(); + + private: + bool has_run = false; + std::unique_ptr fcImpl; +}; +} // namespace testing +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h new file mode 100644 index 0000000000000000000000000000000000000000..108dea3f1f72d79433faf1b9ddb56f54727ac6e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h @@ -0,0 +1,21 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Module; + +using ModuleHook = std::function; +using FunctionHook = std::function; + +TORCH_API void didFinishEmitModule(Module module); +TORCH_API void didFinishEmitFunction(StrongFunctionPtr defined); +TORCH_API void setEmitHooks(ModuleHook for_module, FunctionHook for_fn); + +TORCH_API std::pair getEmitHooks(); + +} // namespace jit +} // namespace torch