diff --git a/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..7efaa37e60ea055e89deab3cc769cbf4f98803fa --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a8575ec2d83c8ea458a1f0ae4819061f99fb199d013258aab3988a68cdd7783 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..99175ab533d90cf89ebd344385c8533d8f36e99d --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b713ee72f75b4debab054e71a88b7a269c0a95a44ba53ec3022093be4a45b478 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h new file mode 100644 index 0000000000000000000000000000000000000000..18e9f67641e048fa78865716237c385dc5ba2321 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void AnnotateWarns(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h new file mode 100644 index 0000000000000000000000000000000000000000..ca21f2c60d0315a356df82791a1ba60f9cfc0123 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h @@ -0,0 +1,15 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void Autocast(const std::shared_ptr& graph); + +TORCH_API bool setAutocastMode(bool value); +TORCH_API bool autocastEnabled(); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..266f54023e1a153492f6f37160c04c7e013e588d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +// Replaces prim::Guard nodes with prim::BailOut nodes and +// computes sets of inputs needed to resume execution at +// bailout points +TORCH_API void InsertBailOuts(std::shared_ptr graph); + +// Builds a bailout graph into `target` (which is an empty graph) +// for a given bailout point `bailout_index` +// from the original graph `orig` (the original unoptimized graph) +// BailOut graphs allow Interpreter to resume +// execution of the (un/de)optimized graph (i.e. +// a graph that doesn't rely on any assumptions derived from +// on profiling information) from a given BailOut point +// should any of the assumptions fail for an actual input. +TORCH_API std::shared_ptr BuildBailOutGraphFrom( + int64_t bailout_index, + const std::shared_ptr& orig, + const std::shared_ptr& target); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..643134750cc483439685c4dde566ead0d5bc7a3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void BatchMM(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..38ae569dbec31541b4ac032fb1637aead4e43204 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CanonicalizeOps(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..87e86d8a7e4b2965a16422cd952418aef7e7db5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h @@ -0,0 +1,12 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CheckStrictFusion(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h new file mode 100644 index 0000000000000000000000000000000000000000..7dee9bdb52ad6c460366953f696480140f219fb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void unprofileGraphInputs(const std::shared_ptr& graph); +TORCH_API void unprofileBlock(Block* start_block); +// Unprofiles all the node outputs in a block. + +TORCH_API void ClearProfilingInformation(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h new file mode 100644 index 0000000000000000000000000000000000000000..24add48764c58a143ed755c41ec10f315a1b3207 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Undefinedness makes argument matching fail for regular tensor operations +// if 1+ arguments are undefined or possibly undefined tensors. +// Technically, undefined tensors are **not** tensors as the regular tensor +// operations do not know how to handle them. +// However, in practice, there are guards and conversion operators that +// **always** gate regular operations if undefined tensors may be present +// Eventually, we would love to move to the world where we use optionals +// in lieu of undefined tensors. +// When this happens, this pass will be removed +TORCH_API void ClearUndefinedness(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..9c8956029dace1988981fa98e8a05f9498c25f81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool EliminateCommonSubexpression( + const std::shared_ptr& graph); +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..6eb4ca16077eb41287c5009ebc52744eaf235a62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void ConstantPooling(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h new file mode 100644 index 0000000000000000000000000000000000000000..481b2aa352107bc74f776b7bcd3bb24251b80c0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace jit { + +// insert GraphExecutor nodes that group together +// subgraphs that are differentiable by the jit's autodiff passes +// threshold - minimum number of nodes that will appear in a block +// returns all differentiable blocks that have been found +TORCH_API std::vector CreateAutodiffSubgraphs( + const std::shared_ptr& graph, + size_t threshold = 2); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..159592a6c6672dd2fccf0768496aea1de44f1ff9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +struct Graph; + +// Propagates Device type info throughout the given graph. +TORCH_API bool DeviceTypePropagation(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..b4bcdcdc7ae7ea32abf2bfb49a07be8ec48dd82d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +struct Graph; + +// Propagate tensor properties (e.g., dtype, device, is_contiguous, layout) +// propagation on all tensor objects. Currently, we only support dtype +// propagation +TORCH_API bool DtypePropagation(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0087d306c23bc9b4580d669ec8009ec8b83b9e79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Remove ops that do nothing on the forward pass (like aten::detach). +// This pass is invoked as a part of freeze_module. +// This function also takes a set of custom ops to eliminate. All ops in this +// set must take their output as their first input, i.e. x = f(x, ...) +TORCH_API bool EliminateNoOps( + std::shared_ptr& graph, + std::unordered_set custom_ops = {}); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4aef1f5570694d20141ecc2e04a37eaf2ef0d3b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Erase NumberType information. This is necessary for and only used in +// exporting to ONNX. This pass ensures that no remaining Values have +// NumberType types, replacing them with tensors. +// The following things are done to erase NumberType info: +// - NumberType outputs are changed to DynamicType. +// - prim::Constant nodes which are numbers get changed into 0-dim tensors of +// the corresponding type +// - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes +// are erased. +// +// The pass assumes that DCE will be called sometime after. +TORCH_API void EraseNumberTypes(const std::shared_ptr& graph); +TORCH_API void EraseNumberTypesOnBlock(Block* block); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h new file mode 100644 index 0000000000000000000000000000000000000000..472d95843a1c6dc921b072b59ea013fcbb6d57ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +// Directly after tracing, we have an ill-formed graph with blocks inserted. +// Example: +// +// graph(%self : ClassType, +// %input.1 : Float(3, 4)): +// %1 : ClassType = prim::GetAttr[name="relu1"](%self) +// %2 : ClassType = prim::GetAttr[name="relu2"](%self) +// %3 : ClassType = prim::GetAttr[name="rrr"](%2) +// = prim::TracedModuleForward[scope="__module.relu1"]() +// block0(): +// %input : Float(3, 4) = aten::relu(%input.1), +// -> () +// = prim::TracedModuleForward[scope="__module.relu2"](), +// block0(): +// = prim::TracedModuleForward[scope="__module.relu2.rrr"](), +// block0(): +// %6 : Float(3, 4) = aten::relu(%input), +// -> () +// -> () +// return (%6) +// +// In this pass, we: +// 1) Lift Value defs to as high of a scope as needed to ensure that +// they dominate all their uses. For example, `input` in the above +// graph needs to be lifted to the top-level block so that its use +// in the second `relu` operator is dominated. +// 2) Lambda lift the blocks. This ensures that all values used within +// each scope have their defs captured. +// 3) Convert the scope blocks into methods on their respective Modules, +// and convert TracedModuleForward nodes to CallMethod nodes into those +// methods. +// +// Then, we'll have a well-formed graph with proper method calls. +TORCH_API void FixupTraceScopeBlocks( + std::shared_ptr& graph, + Module* self); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h new file mode 100644 index 0000000000000000000000000000000000000000..4032d22f2bc13f667cec5025148879cd7117cf83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this + * module and all its submodules, forward is included by default. + * + * The weight and bias of the Conv2d are correspondingly updated. Should only be + * used on modules in eval mode. + */ +TORCH_API Module FoldConvBatchNorm(const Module& module); + +struct TORCH_API ConvBNParameters { + at::Tensor conv_w; + at::Tensor conv_b; + at::Tensor bn_rm; + at::Tensor bn_rv; + double bn_eps = 0.0; + at::Tensor bn_w; + at::Tensor bn_b; +}; + +/** + * Given the current weight and bias tensors of a Conv module and parameters + * of the BatchNorm module we're folding with, compute the updated values + * for the weight and bias. + * + * The function is basically copied from torch/nn/utils/fusion.py + */ +TORCH_API std::tuple computeUpdatedConvWeightAndBias( + const ConvBNParameters& p); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h new file mode 100644 index 0000000000000000000000000000000000000000..7a50519cd92d5b536d76c851e87d31a2c5911cf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h @@ -0,0 +1,36 @@ +/** \brief This file defines freezing Torchscript module API. + * + * This API has python-binding and can be invoked directly or as a part of + * general optimization pipeline. + */ +#pragma once + +#include +#include + +/** \brief Freeze Module, i.e., Assume all attributes are constants. + * + * Freezing module is a functionality that allows the JIT to internalize + * immutable attributes. Combined with inlining, the module is aggressively + * optimized and significant overhead is optimized away. The freezeModule API + * produces a cloned frozen module. + */ + +namespace torch { +namespace jit { + +TORCH_API Module freeze_module( + const Module& module, + std::vector preservedAttrs = std::vector(), + bool freezeInterfaces = true, + bool preserveParameters = false); + +// Clone-free version of freeze_module. This modifies the module inplace. +// Use this version to avoid extra memory usage incurred by cloning the module. +TORCH_API void freeze_module_inplace( + Module* module, + std::vector preservedAttrs = std::vector(), + bool freezeInterfaces = true, + bool preserveParameters = false); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..d64cda0a88f2bbc0ebe585bf8b63e5bd94744743 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Concats multiple linear ops with the same Tensor input +// into a single linear op. +TORCH_API bool FrozenConcatLinear(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h new file mode 100644 index 0000000000000000000000000000000000000000..87c610781f1126602123bb951f365bc365791f9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +/** \brief Runs a set of Optimizations that Optimize Frozen Graphs + * + * Currently this set of optimizations is: + * - FoldFrozenConvBatchnorm + * - FoldFrozenConvAddOrSub + * - FoldFrozenConvMulOrDiv + * - FoldFrozenLinearBatchnorm + */ + +namespace torch { +namespace jit { + +TORCH_API void OptimizeFrozenGraph( + std::shared_ptr& graph, + bool optimize_numerics = true); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..bac4bedd53a6bb45999d5d276986d0946e1a2f0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Fuses Linear -> BatchNormNd into a single Linear by +// folding batchnorm weights into linear weights. +// This pass only works on Frozen Graphs; otherwise it is a No-Op. +TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h new file mode 100644 index 0000000000000000000000000000000000000000..d6ffc36906ad7e51b3fb1bc940cac69e8fb1b433 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Converts operators & their parameters to mkldnn if it is profitable +// Currently encompassing Conv2d and Conv3d, and Linear +// Op must be in float32 and mkldnn must be built +// This pass only works on frozen graph +TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..6577431368e9b9b8d99dd5995fc805d6b4d2d742 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +TORCH_API void FuseAddRelu(script::Module& module); +TORCH_API void FuseAddRelu(std::shared_ptr& graph); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..aafb442eafb6f5e1b1e506c06627e9e9a03a5eed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool canFuseOnCPULegacy(); +TORCH_API void overrideCanFuseOnCPULegacy(bool value); + +// NB: Be sure to run DCE before fusion, because dead instructions +// can prevent fusion opportunities from being exploited. +// On Windows will noop, NYI +TORCH_API void FuseGraph( + std::shared_ptr& graph, + bool strict_fuser_check = false); + +// \brief Custom fusion pass using a node-level callback to +// determine the inclusion of nodes in a subgraph. +// +// This helper omits aliased inputs and fusion across control flow +// boundaries. +// +// \arg graph The graph to be modified in-place +// \arg is_fusable A callback run on each fusable node in the graph. +// \arg kind The label given to the resultant fused subgraph +// \arg arg_limit The maximum number of args the resultant fused subgraph +// should have. Note: This will likely develop into a general +// post condition on the fused subgraph. +TORCH_API void CustomFuseGraph( + std::shared_ptr& graph, + const std::function& is_fusable, + Symbol kind, + size_t arg_limit = std::numeric_limits::max()); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..03f13140e370df55903bd9ce00ea04b624bce795 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void EliminateRedundantGuards(std::shared_ptr graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h new file mode 100644 index 0000000000000000000000000000000000000000..37bfa07a1267023bcd6d2227b03dac75d0f233b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +void HoistConvPackedParams(script::Module& m); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h new file mode 100644 index 0000000000000000000000000000000000000000..8edc81224a07321786937bdebde0d19c5c119c22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool canRunWithAutograd(Node* node); + +TORCH_API void InlineAutodiffSubgraphs( + std::shared_ptr& graph, + size_t threshold = 5); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h new file mode 100644 index 0000000000000000000000000000000000000000..c2dbacdc4ddab7a18f495cfc9f8dc34640f65902 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Inline Fork and Wait calls. This is used, for example, in ONNX export, where +// we do not support the explicit parallelism structures and would rather +// just have a flat graph. This inlines the forked section in the fork() +// callsite and replaces uses of the result of wait() calls with the values +// produced from the (now-inlined) forked section. +TORCH_API void InlineForkWait(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h new file mode 100644 index 0000000000000000000000000000000000000000..164c29f8b6557f50b02401704c2622dc187d86aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void inlineForkedClosures(std::shared_ptr& to_clean); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h new file mode 100644 index 0000000000000000000000000000000000000000..b4db0ad189282d83a2e184993e1c790a41527bf3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Inline function and method calls. +TORCH_API void Inline(Graph& graph); + +TORCH_API GraphFunction* tryToGraphFunction(Node* n); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h new file mode 100644 index 0000000000000000000000000000000000000000..6d22d173002f5201a80a839d6d6923d713ac951e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CheckInplace(std::shared_ptr& graph); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h new file mode 100644 index 0000000000000000000000000000000000000000..28d9f168bf3d559ad434883954004797ae96e690 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void InsertGuards(std::shared_ptr graph); + +TORCH_API void RemoveProfilingNodes(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h new file mode 100644 index 0000000000000000000000000000000000000000..5614e96c141f4b611418fec08ce917868728eef1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool RefineIntegerValues(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h new file mode 100644 index 0000000000000000000000000000000000000000..7b612dee9622304b5f9279215da7c798b5958b4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace torch { +namespace jit { + +using SparseBitVector = ::c10::SparseBitVector<256>; + +// BuildLivenessSets computes "bailout" liveness which is equivalent to +// "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}" +TORCH_API std::unordered_map> BuildLivenessSets( + std::shared_ptr graph); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h new file mode 100644 index 0000000000000000000000000000000000000000..5895f2fcee7462b8f9627651e58590d96299ac93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool UnrollLoops(std::shared_ptr& graph); + +// Only unrolls constant loops. Will unroll them regardless of loop block size +TORCH_API bool UnrollConstantLoops(std::shared_ptr& graph); + +TORCH_API Node* PeelLoop(Node* n, size_t times); + +// return true if graph is modified +TORCH_API bool PeelProfilingLoops(const std::shared_ptr& graph); + +struct TORCH_API LoopsPeeler { + LoopsPeeler(std::function callback, size_t num_iterations = 1) + : callback_(std::move(callback)), num_iterations_(num_iterations) {} + + bool run(const std::shared_ptr& graph); + + private: + void collectLoop(Node* n); + void collectLoops(Block* block); + void peelLoops(); + + std::function callback_ = nullptr; + Node* in_loop_ = nullptr; + std::list loops_to_peel_; + size_t num_iterations_ = 1; +}; +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..6c9ea6666835a26e39ddf82830d1c43b7cd45748 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +using ModulePtr = c10::intrusive_ptr; + +// Given a graph with of a method which first argument is %self, lower it to a +// graph where all attributes accesses are replaced with explicit inputs of the +// graph (rather than results of prim::GetAttr executed on %self). +// +// Returns a tuple (graph, parameters) where the last module.parameters.size() +// inputs to the graph are the trainable parameters used in this method. The +// remaining inputs are the true inputs to the function. +TORCH_API std::pair, std::vector> LowerGraph( + Graph& graph, + const ModulePtr& self); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h new file mode 100644 index 0000000000000000000000000000000000000000..3ac9127b29fb084bb0c1d01d7684ac429e176453 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// removes tuples where TupleConstruct and TupleUnpack are matched +// but leaves tuples in place across if statements, loops, and as inputs/outputs +TORCH_API void LowerSimpleTuples(const std::shared_ptr& graph); + +// removes _all_ tuples and raises an error if some cannot be removed +// this is used by ONNX to ensure there are not tuples before conversion, +// but will not work on graphs whose inputs contain tuples. +TORCH_API void LowerAllTuples(const std::shared_ptr& graph); + +TORCH_API void LowerSimpleTuples(Block* block); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..30e4825cedd17962a7f90dfa93e6a7b5dba319cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h @@ -0,0 +1,17 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { +TORCH_API void metalInsertPrePackedOps(std::shared_ptr& graph); +TORCH_API void metalInsertPrePackedOps(script::Module& module); +TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module); +TORCH_API void metalFoldPrePackingOps(script::Module& module); +TORCH_API script::Module metalOptimizeForMobile( + const script::Module& module, + const std::vector& preserved_methods); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..30d02332429a81ec50c6dc3fae8ab7cddff88714 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include + +#if AT_MKLDNN_ENABLED() + +#include + +#endif // AT_MKLDNN_ENABLED() + +namespace torch { +namespace jit { + +#if AT_MKLDNN_ENABLED() + +namespace mkldnn { + +const static std::map> + fusion_rewrite_map = { + {"none", {}}, + {"relu", {}}, +}; + +} // namespace mkldnn + +#endif // AT_MKLDNN_ENABLED() + +void FuseConvWithEltwise(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h new file mode 100644 index 0000000000000000000000000000000000000000..d11f288dca343308bf2167c89a3d6b2d0792a569 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +enum class MobileOptimizerType : int8_t { + CONV_BN_FUSION, + INSERT_FOLD_PREPACK_OPS, + REMOVE_DROPOUT, + FUSE_ADD_RELU, + HOIST_CONV_PACKED_PARAMS, + CONV_1D_TO_2D, + VULKAN_AUTOMATIC_GPU_TRANSFER, +}; diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..aeb79470b01ae60e38282b4d29b6942af4189ac5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace jit { +namespace fuser { +namespace onednn { + +static std::atomic onednn_enabled{true}; + +static std::atomic& getLlgaEnabled() { + return onednn_enabled; +} + +TORCH_API void fuseGraph(std::shared_ptr& g); + +} // namespace onednn +} // namespace fuser + +struct C10_EXPORT RegisterLlgaFuseGraph + : public PassManager { + static bool setEnabled(bool enabled) { + TORCH_CHECK( + AT_MKLDNN_ENABLED(), + "Running oneDNN Graph fuser is only supported with MKLDNN builds."); + bool oldState = fuser::onednn::getLlgaEnabled(); + fuser::onednn::getLlgaEnabled() = enabled; + if (enabled) { + registerPass(fuser::onednn::fuseGraph); + } else { + clearPass(); + } + return oldState; + } + + static bool isEnabled() { + return fuser::onednn::getLlgaEnabled(); + } + + // override PassManager::registerPass to register pre-pass + static bool registerPass(GraphPass p) { + if (!isRegistered()) { + passID(registerPrePass(std::move(p)), true); + isRegistered(true); + return false; + } + return true; + } + + // override PassManager::clearPass to clear pre-pass + static void clearPass() { + if (isRegistered()) { + clearPrePass(passID()); + isRegistered(true); + } + } +}; + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..8585c6ecdb3de5c0ce1a3a72be9d722d2b423f0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h @@ -0,0 +1,136 @@ +#pragma once + +#include + +/* `getCustomPrePasses()` returns a vector of passes that will be executed + * after differentiation but before any fusion. This is the de-facto location + * for compiler backends to insert passes. + * + * `getCustomPostPasses()` returns a vector of passes that will be + * executed after differentiation and after fusion (if any). This is the + * location for fusion cleanup passes if they are needed. + * + * Static registration of a pass can be done by creating a global + * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit. + * + * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which + * modify the IR graph in place. + */ + +namespace torch { +namespace jit { + +// A pass modifies a Graph in place. +using GraphPass = std::function&)>; + +// Since Passes are std::functions, we associate a UUID to each pass, this way +// if we want to deregister a pass, we have something to reference it by. +using GraphPassNameType = unsigned int; + +// Graph pass entries have a name associated with them +using GraphPassEntry = std::pair; + +// Return currently registered passes. Passes are stored in a static vector +TORCH_API std::vector>& +getCustomPostPasses(); +TORCH_API std::vector>& +getCustomPrePasses(); + +TORCH_API GraphPassNameType registerPostPass(GraphPass p); +TORCH_API GraphPassNameType registerPrePass(GraphPass p); + +// Look up pass by name passed in, remove it from registered passes +TORCH_API void clearPostPass(GraphPassNameType p); +TORCH_API void clearPrePass(GraphPassNameType p); + +// Remove all passes +TORCH_API void clearAllPostPasses(); +TORCH_API void clearAllPrePasses(); + +// LEGACY CALL +struct TORCH_API RegisterPostPass { + RegisterPostPass(GraphPass p); +}; + +using RegisterPass = RegisterPostPass; + +/* + * PassManager is a wrapper on the register/clear PostPass functions above. It + * will register the pass provided in "registerPass" and will hold on to its + * associated name that way clearPass can be later called and will delete the + * pass used to register when called. + * + * PassManager is templated because we want static variables based on a + * particular GraphPass. When deriving from PassManager, you should send as the + * template parameter your derived class as you would for the curiously + * recurring template pattern. This template parameter isn't actually used and + * is simply done to prevent static members from being shared across derived + * types. + */ +template +struct C10_EXPORT PassManager { + private: + // We want this class to be abstract because it's + virtual void abstract() = 0; + + protected: + /* + * isRegistered() will return if a pass has been registered + * isRegistered(true) will change the value of the internal static bool + * + * There's an internal static bool to this function to keep track of the + * state, this is so when functions are derived from this class, they don't + * have to worry about initializing the static members. + */ + static bool isRegistered(bool flip_bit = false) { + static bool val = false; + if (flip_bit) + val = !val; + return val; + } + + /* + * name() will return the name of the registered pass + * name(pass_name, true) will set the name of the pass + * Similarly to isRegistered we use an internal static variable to hold the + * name. + */ + static GraphPassNameType passID( + GraphPassNameType PassID = 0, + bool set = false) { + static GraphPassNameType pass_id = 0; + if (set) + pass_id = PassID; + return pass_id; + } + + public: + // registerPass(pass) will register the pass provided and set the + // name/isRegistered functions appropriately, it returns a bool value + // indicating whether the given pass is already registered previously. + static bool registerPass(GraphPass p) { + if (!isRegistered()) { + // If we don't already have a registered pass, register pass + // hold on to its name, change isRegistered to true + passID(registerPostPass(std::move(p)), true); + isRegistered(true); + return false; + } + return true; + } + + // Calls ClearPostPass(passID()) + static void clearPass() { + // If the pass is registered, clear it and change isRegistered to false. + if (isRegistered()) { + clearPostPass(passID()); + isRegistered(true); + } + } + + // clang-tidy requires virtual destructor; + virtual ~PassManager() = default; +}; + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h new file mode 100644 index 0000000000000000000000000000000000000000..e2d8d5f9a9f2082fe0eb94bb5aee4a7605dc7042 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// return true if graph is modified +TORCH_API bool PeepholeOptimize( + const std::shared_ptr& graph, + bool disable_shape_peepholes = false); +// return true if graph is modified +TORCH_API bool PeepholeOptimize( + Block* block, + bool disable_shape_peepholes = false); +// return true if graph is modified +TORCH_API bool FuseAddMM(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h new file mode 100644 index 0000000000000000000000000000000000000000..d61a0a4ec0d8be8e1c3e49e352b288a4767b9ed0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes alias sensitive peepholes +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified +// Optimizes on TensorType if shape_peepholes is true +TORCH_API bool PeepholeOptimizeAliasSensitive( + const std::shared_ptr& graph, + bool shape_peepholes); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h new file mode 100644 index 0000000000000000000000000000000000000000..d20df9571db01e0e0a0a3991b410621bfcb346ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h @@ -0,0 +1,72 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Peephole Optimizes List ops such as len(li) and li[1]. +// 1. Construct/Unpack optimizations +// Given a function like this: +// def foo(a, b): +// li = [a, b] +// x, y = li +// return x, y +// This pass produces (after dead code elimination): +// def foo(a, b): +// return a, b +// +// This is only applied to lists that are not modified. +// +// 2. getitem optimizations +// Given a function like this: +// def foo(a, b): +// li = [a, b] +// x = li[0] +// return x +// This pass produces (after dead code elimination): +// def foo(a, b): +// return a +// +// This optimization can only happen if the list is not modified. +// +// 3. len optimizations +// Given a function like this: +// def foo(): +// li = [1, 2] +// return len(li) +// This pass produces (after dead code elimination): +// def foo(): +// return 2 +// +// This has the same requirements as the getitem optimizations. +// +// 4. ListConstruct + ListConstruct +// Given a function like this: +// def foo(): +// return [1, 2] + [3, 4] +// This pass produces (after dead code elimination): +// def foo(): +// return [1, 2, 3, 4] +// +// This is only applied to lists that are not modified. +// +// 5. Slice +// Given a function like this: +// def foo(): +// return [1, 2, 3, 4, 5][0:2] +// This pass produces (after deadcode elimination): +// def foo(): +// return [1, 2] +// +// Currently this is invoked as part of PeepholeOptimize +// return true if graph is modified. +// If `refine_list_len` is true will attempt to refine the len of lists through +// len comparisons and assertions. This does not generally optimize pytorch +// programs so it is not called by default in PeepholeOptimize. +TORCH_API bool PeepholeOptimizeListIdioms( + const std::shared_ptr& graph, + bool refine_list_len = false); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h new file mode 100644 index 0000000000000000000000000000000000000000..13761dc5473efd83c02339a8205cb4cfeb8038f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +using PrePackingOpsFilterFn = std::function; + +void PrePackingOpsFolder( + script::Module& m, + const PrePackingOpsFilterFn& is_foldable_op, + const std::string& attr_prefix); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h new file mode 100644 index 0000000000000000000000000000000000000000..0204d5f73f04f3420f8d822c782739417b31b7e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +/** Recursively deduplicate multiple uses of the same module by + * creating an instance clone for each use of the module, which means + * the type will be the same as before and all the attributes will be + * copied, then we'll change the use of the original module to the use + * of cloned module in the Graph. + * + * This is done to ensure that modules can survive destructive passes + * without changing model behavior. For example, here: + * + * x = self.conv1(x) + * x = self.relu(x) + * x = self.conv2(x) + * x = self.relu(x) + * + * self.relu needs to be deduplicated for potential future destructive passes + * to work properly. + */ +TORCH_API void DedupModuleUses(Module& module); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h new file mode 100644 index 0000000000000000000000000000000000000000..d73addbc387f6b4d55360480b3c20fc1e0b84d3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +/** \brief Backend specific pass to fuse dequantize - op - quantize calls + * as quantized_op calls. + * + * Right now this is a fusion for fbgemm backend and only works for quantized + * conv op, we'll extend to more ops and more backends in the future. + * + * Currently supported fusion: + * q(conv2d(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_conv2d(prepack(to_nhwc(a)), + * prepack(to_nhwc(w)), + * prepack(to_nhwc(b)))) + * + * q(linear(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_linear(prepack(to_nhwc(a)), + * prepack(to_nhwc(w)), + * prepack(to_nhwc(b)))) + * + * \param graph the graph we want to apply fusion + */ +TORCH_API void QuantFusion( + std::shared_ptr& graph, + QuantType quant_type = QuantType::STATIC); + +/** \brief Insert prepack and unpack function in graph + * We want add pack/unpack functions for quantized weight because later we want + * to fold the packed weight as an attribute of the module, in order to reduce + * the cost of packing the weight on the fly in quantized models. + * + * Each quantized op has it's corresponding prepack/unpack function, + * right now, we only need to do prepack/unpack for quantized::linear + * and quantized::conv2d. + */ +TORCH_API void InsertPrepackUnpack(std::shared_ptr& graph); + +/** \brief Insert pack and unpack function in all graphs + * of module + * + * Go through graphs of all the methods of all child modules + * and call InsertPrepackUnpack on the graph. + */ +TORCH_API void InsertPrepackUnpack(Module& module); + +TORCH_API script::Module Finalize( + script::Module& module, + QuantType quant_type = QuantType::STATIC, + const std::vector& preserved_attrs = + std::vector()); + +TORCH_API void FoldQuantizedPrepackingOps(Module& module); + +TORCH_API Module FinalizeOnDevicePTQ( + Module& module, + QuantType quant_type, + const std::string& method_name); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h new file mode 100644 index 0000000000000000000000000000000000000000..b316fe2adab92911b91ae7c7cf2bad050fc2afc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +TORCH_API void FuseQuantizedAddRelu(std::shared_ptr& graph); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h new file mode 100644 index 0000000000000000000000000000000000000000..b5a5adf40b65c6838b23d5f2999b8058d7e825cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h @@ -0,0 +1,216 @@ +#pragma once +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +using graph_rewrite_helper::getFuncName; + +// Vector of a module and the name of its method +using ModuleMethodVector = std::vector>; +// Map of quantization parameter name and value +// for example _scale, _zero_point, +// _scalar_type and _axis(for per channel quantization) +using QParamVector = std::vector>; + +// =========== helper functions for Value ========= +// Check if a value is weight, since we need to use weight observer +// for weight +TORCH_API bool isWeight(Value* v); + +// Check if a value is bias for conv and linear, which we do not +// quantize +TORCH_API bool isBiasOfConvOrLinear(Value* v); + +TORCH_API bool isEmbeddingBagNonInput(Value* v); + +// Get the use as scalar input of clamp ops for the input value +c10::optional getClampScalarInputUse(Value* v); + +// For a given value `v`, get the list of values that we need to check +// if they are observed/quantized or not, if so, we can say the +// `v` is also observed/quantized, since we can derive +// the quantization parameters for `v` given the list of values +TORCH_API std::vector getPassThroughInputs(Value* v); + +// Clones the method by the name of orig_method_name into new_method_name method +TORCH_API void cloneMethod( + Module& module, + const std::string& orig_method_name, + const std::string& new_method_name); + +// Check if a value in the graph is a Scalar value +TORCH_API bool isScalar(Value* v); + +// Check if value is the input of the graph +TORCH_API bool hitGraphInput(Value* value); + +// Converts a mangled name, such as +// __torch__.torch.ao.nn.quantized.modules.conv.___torch_mangle_7.Conv2d +// into an unmangled name, such as +// __torch__.torch.ao.nn.quantized.modules.conv.Conv2d +TORCH_API std::string removeTorchMangle(const std::string& orig_name); + +// Return the module name that corresponds to the value. +TORCH_API c10::optional getModuleName(Value* value); + +// =========== helper functions for Node ========= +TORCH_API bool isSingleInputGeneralShapeAtenFunction(Node* n); + +TORCH_API bool isSingleInputGeneralValueAtenFunction(Node* n); + +TORCH_API bool isSingleInputGeneralCallFunction(Node* n); + +TORCH_API bool isSingleInputGeneralAtenFunction(Node* n); + +TORCH_API bool isClamp(Node* n); + +// Check if the node will produce the same result regardless of whether +// the input tensor is quantized or not, example: aten::size +TORCH_API bool isTensorInfoNode(Node* n); + +// Check if this the propagate op that has single input, e.g. aten::cat +TORCH_API bool isPropagateQuantSingleInputOp(Node* n); + +// Check if this is the propagate op that has two inputs, e.g. aten::add +TORCH_API bool isPropagateQuantBinaryOp(Node* n); + +// Check if this is the node that we'll quantize or not quantize depending on +// whether the input of the node is quantized, example: aten::cat +TORCH_API bool isPropagateQuantOp(Node* n); + +// Check if the node is a binary op like aten::add and aten::mul and +// if the input 1 is a scalar, these ops will be quantized to +// quantized::{op}_scalar +TORCH_API bool isBinaryOpWithScalarInput(Node* n); + +TORCH_API c10::optional> getFixedQParams( + Node* n); + +// We don't want to analyze the graph for some `builtin` CallFunctions +// like `linear` because we want to preserve the op boundary +TORCH_API bool userDefinedCallFunction(Node* n); + +// Check if the node has scalar input +TORCH_API bool hasScalarInput(Node* n); + +// Check if a node is quantizable +TORCH_API bool nodeQuantizable( + Node* n, + QuantType quant_type = QuantType::STATIC); + +// Nodes which only require quantization of weight value, eg. embedding_bag +bool isWeightOnlyStaticQuantOp(Node* n); + +// Check if a use of the value is quantizable, this depends on +// both the use node and the offset +TORCH_API bool useQuantizable(const Use& use, QuantType quant_type); + +// Given a CallFunction node, extract the graph of the called function +TORCH_API std::shared_ptr getCallFunctionGraph(Node* n); + +// Check if `use` is a CallFunction of name `func_name` and if value +// `v` is the nth argument (if provided) of the function +bool matchCallFuncToUse( + const Use& use, + const std::string& func_name, + c10::optional nth_arg); + +// Check if `use` is a AtenFunction of name `func_name` and if value +// `v` is the nth argument (if provided) of the function +bool matchAtenFuncToUse( + const Use& use, + const std::string& func_name, + c10::optional nth_arg); + +// =========== helper functions for Block ========= +// checks if a block will always raise an Exception +TORCH_API bool alwaysRaisesException(Block* block); + +// =========== helper functions for Module ========== +// TODO: remove +TORCH_API std::vector getModuleAccessPath( + Value* instance, + Value* self); +// TODO: remove +TORCH_API Module +findChildModule(const Module& module, const std::vector& path); + +// Given an CallMethod node, get the module instance corresponding +// to the instance Value +// TODO: refactor all current uses of this function to the Opt one +TORCH_API Module getInvokedModule(Module& module, Node* n, Value* self); + +// Given an CallMethod node, get the module instance corresponding +// to the instance Value if the instance is a module, otherwise return +// c10::nullopt +c10::optional getInvokedModuleOpt( + const Module& module, + Node* n, + Value* self); + +// ==================== filter functions for matches ============== +// filter to check Value `vname` is a constant of int value `value` +bool is_int_constant( + const Match& match, + const std::unordered_map& vmap, + const std::string& vname, + int value); + +// filter to check if the %alpha argument of aten::add is constant 1 +bool aten_add_alpha_is_one( + const Match& match, + const std::unordered_map& vmap); + +// filter to check if the functional in CallFunction is relu +bool is_functional_relu( + const Match& match, + const std::unordered_map& vmap); + +// filter to check if the module is torch.nn.ReLU +bool is_relu_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_linear_module( + const Match& match, + const std::unordered_map& vmap); + +// TODO: add a macro to declare the filters +bool is_conv1d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv2d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv3d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv_transpose1d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_conv_transpose2d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_batchnorm2d_module( + const Match& match, + const std::unordered_map& vmap); + +bool is_batchnorm3d_module( + const Match& match, + const std::unordered_map& vmap); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h new file mode 100644 index 0000000000000000000000000000000000000000..6fa7fe04491122fc40ed5c2309c6c08cf01d57d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include + +namespace std { + +template <> +struct hash { + inline size_t operator()(const torch::jit::Module& arg) const { + return std::hash>()(arg._ivalue()); + } +}; + +} // namespace std + +namespace torch { +namespace jit { + +using QConfig = std::tuple; +using QConfigDict = std::unordered_map>; + +/** \brief Insert observer module and observer function call for + * the Tensors that needs to be observed. + * + * For each Tensor that needs to be observed in the method, insert observer + * module to the input module and add forward calls of observer to the specified + * method. + * + * \param module the input module + * \param method_name the method we want to insert observers for + * \param qconfig_dict the qconfig dictionary that specifies how + * each module is going to be quantized + * \param inplace whether we want to do inplace modification to the input module + * or clone the module + * \param is_dynamic whether the dynamic quantization script is being used. + */ +TORCH_API Module InsertObservers( + Module& module, + const std::string& method_name, + const QConfigDict& qconfig_dict, + bool inplace, + QuantType quant_type = QuantType::STATIC); + +/** \brief Insert observer module and observer method for + * the Tensors that needs to be observed. + * + * For each Tensor that needs to be observed in the method, insert observer + * module to the input module and observe_ methods to the module. + * This method is clone of mehtod_name with forward calls of observer added. + * + * \param module the input module + * \param method_name the method we want to insert observers for + * \param qconfig_dict the qconfig dictionary that specifies how + * each module is going to be quantized + * \param inplace whether we want to do inplace modification to the input module + * or clone the module + * \param is_dynamic whether the dynamic quantization script is being used. + */ +TORCH_API Module InsertObserversForOnDevicePTQ( + Module& module, + const std::string& method_name, + const QConfigDict& qconfig_dict, + bool inplace, + QuantType quant_type = QuantType::STATIC); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h new file mode 100644 index 0000000000000000000000000000000000000000..de2b31fdba7ca80223bad52847b78060868ba9ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +/** Replicate quantize node for prim::If blocks, so that we can match + * quantization patterns in prim::If blocks + */ +TORCH_API void ReplicateQuant(std::shared_ptr& graph); + +/** Replicate dequantize node for each use, so that we can match + * quantization patterns + */ +TORCH_API void ReplicateDeQuant(std::shared_ptr& graph); + +/** \brief Insert quantize - dequantize calls to the Tensors + * that are observed in insert_observers pass + * + * For each Tensor that is observed, get the observer module and call + * calculate_qparam on the observer module to get quantization parameters + * and add quantize - int_repr - dequantize function calls using these + * parameters we also have special handling for quantizing "bias" right now. + * + * \param module the input module + * \param method_name the method we want to insert quantization calls for + */ +TORCH_API Module InsertQuantDeQuant( + Module& module, + const std::string& method_name, + bool inplace, + bool debug, + QuantType quant_type = QuantType::STATIC); + +TORCH_API Module InsertQuantDeQuantOnDevicePTQ( + Module& module, + const std::string& method_name, + bool inplace, + bool debug, + QuantType quant_type = QuantType::STATIC); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h new file mode 100644 index 0000000000000000000000000000000000000000..851548862dfc4779bf03e40c6291847c0bbe1eed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h @@ -0,0 +1,1272 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct QuantFusionInfo { + std::string quantized_op_name; + std::string pattern; + std::string replacement; + std::vector filters = {}; +}; + +namespace { +std::string getExtraArgList(std::vector extra_args) { + return std::accumulate( + extra_args.begin(), + extra_args.end(), + std::string(), + [](std::string acc, const std::string& arg) { return acc + ", " + arg; }); +} + +// Get the pattern we want to replace the match with +std::string getAtenOpPattern( + const std::string& graph_header, + const std::string& op_name, + const std::vector& extra_op_args, + bool scalar_args = false) { + std::vector _extra_op_args = extra_op_args; + std::string aten_op_pattern = graph_header; + if (scalar_args) { + for (const auto& extra_arg : _extra_op_args) { + aten_op_pattern + .append(R"( + )") + .append(extra_arg) + .append("_scalar = aten::item(") + .append(extra_arg) + .append(")"); + } + + for (auto& _extra_op_arg : _extra_op_args) { + _extra_op_arg.append("_scalar"); + } + } + const auto& extra_op_arg_list = getExtraArgList(std::move(_extra_op_args)); + aten_op_pattern += R"( + %r = )"; + aten_op_pattern += op_name + "(" + "%a_quant" + extra_op_arg_list + ")"; + aten_op_pattern += R"( + return (%r) )"; + return aten_op_pattern; +} + +// generate ops for quantize pattern for a scalar value +std::string getQuantizeForScalar(const std::string& value) { + // 6 is `torch.float` ScalarType, we are creating a float scalar + // tensor from a scalar value + std::string quantize_pattern = R"( + )" + + value + "_float_scalar_type : int = prim::Constant[value=6]()"; + quantize_pattern += R"( + )" + + value + "_none : None = prim::Constant()"; + quantize_pattern += R"( + )" + + value + "_tensor : Tensor = aten::scalar_tensor(" + value + ", " + value + + "_float_scalar_type"; + for (const auto i : c10::irange(3)) { + (void)i; // Suppress unused variable warning + quantize_pattern += ", " + value + "_none"; + } + quantize_pattern += ")"; + quantize_pattern += + R"( + )" + + value + "_quant = aten::quantize_per_tensor(" + value + "_tensor" + + getExtraArgList( + {value + "_scale", value + "_zero_point", value + "_dtype"}) + + ")"; + return quantize_pattern; +} + +std::string getDequantize(const std::string& value) { + return R"( + )" + + value + "_dequant = aten::dequantize(" + value + "_quant)"; +} + +std::string getItem(const std::string& value) { + return R"( + )" + + value + "_scalar : float = aten::item(" + value + "_dequant)"; +} + +// Patterns for the ops that inherit parameters from input +std::string getInputTensorQParamOpPattern( + const std::string& op_name, + const std::vector& extra_op_args) { + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string op_pattern = "graph(%a_quant" + extra_op_arg_list + "):" + R"( + %a_dequant = aten::dequantize(%a_quant) + %r = )" + + op_name + "(" + "%a_dequant" + extra_op_arg_list + ")" + R"( + %r_scale : float = aten::q_scale(%a_quant) + %r_zero_point : int = aten::q_zero_point(%a_quant) + %r_dtype : int = prim::dtype(%a_quant) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + return op_pattern; +} + +// QuantFusionInfo for the ops that inherit parameters from input +QuantFusionInfo getInputTensorQParamOpFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args) { + std::string op_pattern = + getInputTensorQParamOpPattern(op_name, extra_op_args); + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):"; + std::string op_replacement = + getAtenOpPattern(graph_header, op_name, extra_op_args); + + return {op_name, std::move(op_pattern), std::move(op_replacement)}; +} + +// quant fusion for ops like `quantized::add_scalar`, `quantized::mul_scalar` +QuantFusionInfo getBinaryOpScalarFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args, + const std::string& quantized_op_name, + const std::vector& extra_quantized_op_args, + const std::vector& filters = {}) { + std::string op_pattern = + getInputTensorQParamOpPattern(op_name, extra_op_args); + + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):"; + std::string op_replacement = getAtenOpPattern( + graph_header, quantized_op_name, extra_quantized_op_args); + + return {op_name, std::move(op_pattern), std::move(op_replacement), filters}; +} + +QuantFusionInfo getClampOpFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args) { + std::vector header_args = extra_op_args; + std::vector input_qparams = {"_scale", "_zero_point", "_dtype"}; + for (const auto& arg : extra_op_args) { + for (const auto& qparam : input_qparams) { + header_args.push_back(arg + qparam); + } + } + for (const auto& qparam : input_qparams) { + header_args.push_back("%r" + qparam); + } + const auto& extra_header_arg_list = getExtraArgList(std::move(header_args)); + std::string graph_header = "graph(%a_quant" + extra_header_arg_list + "):"; + std::string op_pattern = graph_header; + for (const auto& arg : extra_op_args) { + op_pattern += getQuantizeForScalar(arg); + op_pattern += getDequantize(arg); + op_pattern += getItem(arg); + } + op_pattern += getDequantize("%a"); + op_pattern += R"( + %r = )"; + std::vector scalar_extra_args; + scalar_extra_args.reserve(extra_op_args.size()); + for (const auto& arg : extra_op_args) { + scalar_extra_args.push_back(arg + "_scalar"); + } + op_pattern += op_name + "(" + "%a_dequant" + + getExtraArgList(std::move(scalar_extra_args)) + ")"; + // IR pattern common to all ops that inherit qparam from input + op_pattern += R"( + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string aten_op_pattern = + getAtenOpPattern(graph_header, op_name, extra_op_args); + + return {op_name, std::move(op_pattern), std::move(aten_op_pattern)}; +} + +// Patterns for the ops that has fixed quantization parameters +QuantFusionInfo getFixedQParamOpFusionInfo( + const std::string& op_name, + const std::vector& extra_op_args, + bool is_symmetric) { + const auto& extra_op_arg_list = getExtraArgList(extra_op_args); + std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):"; + std::string op_pattern = graph_header; + op_pattern += R"( + %a_dequant = aten::dequantize(%a_quant) + %r = )"; + op_pattern += op_name + "(" + "%a_dequant" + extra_op_arg_list + ")"; + // IR pattern common to all ops with fixed quantization parameters for + // asymetric quantization + std::string asym_fixed_qparam_op_suffix = R"( + %r_scale : float = prim::Constant[value=0.00390625]() + %r_zero_point : int = prim::Constant[value=0]() + %r_dtype : int = prim::Constant[value=13]() + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string sym_fixed_qparam_op_suffix = R"( + %r_scale : float = prim::Constant[value=0.0078125]() + %r_zero_point : int = prim::Constant[value=128]() + %r_dtype : int = prim::Constant[value=13]() + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + op_pattern += + is_symmetric ? sym_fixed_qparam_op_suffix : asym_fixed_qparam_op_suffix; + + std::string aten_op_pattern = + getAtenOpPattern(graph_header, op_name, extra_op_args); + + return {op_name, std::move(op_pattern), std::move(aten_op_pattern)}; +} + +// filter that checks %b_scalar is a scalar +bool input_b_is_scalar( + const Match& match, + const std::unordered_map& vmap) { + const auto& match_vmap = match.values_map; + auto b_scalar = match_vmap.at(vmap.at("b_scalar")); + return isScalar(b_scalar); +} + +// Patterns for ops that require observation for output quantization parameters +// Example: +// +// before fusion: +// +// graph(%a_quant, %r_scale, %r_zero_point, %r_dtype): +// %a_dequant = aten::dequantize(%a_quant) +// %r = {op_name}(%a_dequant, {extra_args}) +// %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, +// %r_dtype) return (%r_quant) +// +// after fusion: +// +// graph(%a_quant, %r_scale, %r_zero_point, %r_dtype): +// %r_quant = {quantized_op_name}(%a_quant, {extra_args}, %r_scale, +// %r_zero_point) return (%r_quant) +QuantFusionInfo getObservedQParamOpFusionInfo( + const std::string& fp_op_name, + const std::string& q_op_name, + const std::vector& fp_extra_args, + const std::vector& q_extra_args) { + const auto& fp_extra_arg_list = getExtraArgList(fp_extra_args); + const auto& q_extra_arg_list = getExtraArgList(q_extra_args); + + std::string op_pattern = "graph(%a_quant" + fp_extra_arg_list + + ", %r_scale, %r_zero_point, %r_dtype):" + R"( + %a_dequant = aten::dequantize(%a_quant) + %r = )" + + fp_op_name + "(" + "%a_dequant" + fp_extra_arg_list + ")" + R"( + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string aten_op_pattern = "graph(%a_quant" + fp_extra_arg_list + + ", %r_scale, %r_zero_point, %r_dtype):" + R"( + %r_quant = )" + + q_op_name + "(%a_quant" + q_extra_arg_list + + ", %r_scale, %r_zero_point)" + R"( + return (%r_quant) )"; + + return {q_op_name, std::move(op_pattern), std::move(aten_op_pattern)}; +} + +} // namespace + +static std::vector quant_fusion_pattern_and_replacements() { + // aten::conv1d + std::string conv1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv1d - aten::relu + std::string conv1d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv1d - aten::relu_ + std::string conv1d_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu_(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv1d + std::string quantized_conv1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv1d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // quantized::conv1d_relu + std::string quantized_conv1d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv1d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv2d + std::string conv2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv2d - aten::relu + std::string conv2d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv2d - aten::relu_ + std::string conv2d_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu_(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv2d + std::string quantized_conv2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv2d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // quantized::conv2d_relu + std::string quantized_conv2d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv2d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv3d + std::string conv3d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv3d - aten::relu + std::string conv3d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // aten::conv3d - aten::relu_ + std::string conv3d_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + %r = aten::relu_(%conv_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv3d + std::string quantized_conv3d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv3d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // quantized::conv3d_relu + std::string quantized_conv3d_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups): + %r_quant = quantized::conv3d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv_transpose1d + std::string conv_transpose1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv_transpose1d + std::string quantized_conv_transpose1d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %r_quant = quantized::conv_transpose1d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::conv_transpose2d + std::string conv_transpose2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::conv_transpose1d + std::string quantized_conv_transpose2d = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation): + %r_quant = quantized::conv_transpose2d(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r_quant) )"; + + std::string add_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string add_inplace_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu_(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_add_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add_(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_add_inplace_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add_(%a_dequant, %b_dequant, %alpha) + %r_relu = aten::relu_(%r_add) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string quantized_add_relu = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %r = quantized::add_relu(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + // aten::linear + std::string linear = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a_dequant, %w_dequant, %b) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string linear_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %linear_out = aten::linear(%a_dequant, %w_dequant, %b) + %r = aten::relu(%linear_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string linear_inplace_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %linear_out = aten::linear(%a_dequant, %w_dequant, %b) + %r = aten::relu_(%linear_out) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // quantized::linear + std::string quantized_linear = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %r = quantized::linear(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r) )"; + + std::string quantized_linear_relu = R"( +graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype): + %r = quantized::linear_relu(%a_quant, %packed_params, %r_scale, %r_zero_point) + return (%r) )"; + + std::string cat = R"( +graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype): + %input_dequant = aten::dequantize(%input_quant) + %r = aten::cat(%input_dequant, %dim) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string quantized_cat = R"( +graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype): + %r_quant = quantized::cat(%input_quant, %dim, %r_scale, %r_zero_point) + return (%r_quant) )"; + + // aten::add + std::string add = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add(%a_dequant, %b_dequant, %alpha) + %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype) + return (%r) )"; + + // TODO: add %dtype after when https://github.com/pytorch/pytorch/issues/34351 + // is fixed + // quantized::add + std::string quantized_add = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %r = quantized::add(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + // aten::add_ + std::string inplace_add = R"( +graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_add = aten::add_(%a_dequant, %b_dequant, %alpha) + %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype) + return (%r) )"; + + auto add_scalar = getBinaryOpScalarFusionInfo( + "aten::add", + {"%b_scalar", "%alpha"}, + "quantized::add_scalar", + {"%b_scalar"}, + {aten_add_alpha_is_one, input_b_is_scalar}); + + auto add_scalar_out = getBinaryOpScalarFusionInfo( + "aten::add_", + {"%b_scalar", "%alpha"}, + "quantized::add_scalar_out", + {"%b_scalar", "%a_quant"}, + {aten_add_alpha_is_one, input_b_is_scalar}); + + // quantized::add_scalar_relu -- fusing quantized::add_scalar + // and aten::relu + auto quantized_add_scalar_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar(%a_quant, %b_scalar) + %r = aten::relu(%r_add) + return (%r) )"; + + auto quantized_add_scalar_inplace_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar(%a_quant, %b_scalar) + %r = aten::relu_(%r_add) + return (%r) )"; + + auto quantized_add_scalar_relu_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::add_scalar_relu(%a_quant, %b_scalar) + return (%r) )"; + + // quantized::add_scalar_relu_out -- fusing quantized::add_scalarOut + // and aten::relu + auto quantized_add_scalar_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu(%r_add) + return (%r) )"; + + auto quantized_add_scalar_inplace_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu_(%r_add) + return (%r) )"; + + auto quantized_add_scalar_relu_out_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::add_scalar_relu_out(%a_quant, %b_scalar, %a_quant) + return (%r) )"; + + // quantized::batch_norm + std::string batch_norm = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %a_dequant = aten::dequantize(%a_quant) + %r_bn = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7) + %r = aten::quantize_per_tensor(%r_bn, %scale, %zero_point, %scalar_type) + return (%r) )"; + std::string quantized_batch_norm = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %r = quantized::batch_norm(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point) + return (%r) )"; + + std::string batch_norm_relu = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %a_dequant = aten::dequantize(%a_quant) + %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7) + %relu = aten::relu(%bn_out) + %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type) + return (%r) )"; + std::string batch_norm_inplace_relu = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %a_dequant = aten::dequantize(%a_quant) + %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7) + %relu = aten::relu_(%bn_out) + %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type) + return (%r) )"; + + std::string quantized_batch_norm_relu = R"( +graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type): + %r = quantized::batch_norm_relu(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point) + return (%r) )"; + + // aten::mul + std::string mul = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul(%a_dequant, %b_dequant) + %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype) + return (%r) )"; + + // aten::mul_ + std::string inplace_mul = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul_(%a_dequant, %b_dequant) + %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype) + return (%r) )"; + + // quantized::mul + std::string quantized_mul = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %r = quantized::mul(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + auto mul_scalar = getBinaryOpScalarFusionInfo( + "aten::mul", + {"%b_scalar"}, + "quantized::mul_scalar", + {"%b_scalar"}, + {input_b_is_scalar}); + + auto mul_scalar_out = getBinaryOpScalarFusionInfo( + "aten::mul_", + {"%b_scalar"}, + "quantized::mul_scalar_out", + {"%b_scalar", "%a_quant"}, + {input_b_is_scalar}); + + // quantized::mul_relu + std::string mul_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul(%a_dequant, %b_dequant) + %r_relu = aten::relu(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string mul_inplace_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul(%a_dequant, %b_dequant) + %r_relu = aten::relu_(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_mul_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul_(%a_dequant, %b_dequant) + %r_relu = aten::relu(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string inplace_mul_inplace_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %a_dequant = aten::dequantize(%a_quant) + %b_dequant = aten::dequantize(%b_quant) + %r_mul = aten::mul_(%a_dequant, %b_dequant) + %r_relu = aten::relu_(%r_mul) + %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype) + return (%r) )"; + + std::string quantized_mul_relu = R"( +graph(%a_quant, %b_quant, %scale, %zero_point, %dtype): + %r = quantized::mul_relu(%a_quant, %b_quant, %scale, %zero_point) + return (%r) )"; + + // quantized::mul_scalar_relu -- fusing quantized::mul_scalar + // and aten::relu + auto quantized_mul_scalar_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar(%a_quant, %b_scalar) + %r = aten::relu(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_inplace_relu_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar(%a_quant, %b_scalar) + %r = aten::relu_(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_relu_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::mul_scalar_relu(%a_quant, %b_scalar) + return (%r) )"; + + // quantized::mul_scalar_relu_out -- fusing quantized::mul_scalarOut + // and aten::relu + auto quantized_mul_scalar_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_inplace_relu_out_pattern = R"( +graph(%a_quant, %b_scalar): + %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant) + %r = aten::relu_(%r_mul) + return (%r) )"; + + auto quantized_mul_scalar_relu_out_replacement = R"( +graph(%a_quant, %b_scalar): + %r = quantized::mul_scalar_relu_out(%a_quant, %b_scalar, %a_quant) + return (%r) )"; + + // quantized::elu + std::string elu = R"( +graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %r = aten::elu(%a_dequant, %alpha, %scale, %input_scale) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + std::string quantized_elu = R"( +graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype): + %r_quant = quantized::elu(%a_quant, %r_scale, %r_zero_point, %alpha, %scale, %input_scale) + return (%r_quant) )"; + + std::string elu_ = R"( +graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype): + %a_dequant = aten::dequantize(%a_quant) + %r = aten::elu_(%a_dequant, %alpha, %scale, %input_scale) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + // ============= General Ops that inherit quantization parameters from input + // tensor ============= + auto avg_pool1d = getInputTensorQParamOpFusionInfo( + "aten::avg_pool1d", + {"%kernel_size", + "%stride", + "%padding", + "%ceil_mode", + "%count_include_pad"}); + + auto avg_pool2d = getInputTensorQParamOpFusionInfo( + "aten::avg_pool2d", + {"%kernel_size", + "%stride", + "%padding", + "%ceil_mode", + "%count_include_pad", + "%divisor_override"}); + + std::string common_general_value_op = R"( + %r_scale : float = aten::q_scale(%a_quant) + %r_zero_point : int = aten::q_zero_point(%a_quant) + %r_dtype : int = prim::dtype(%a_quant) + %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype) + return (%r_quant) )"; + + auto avg_pool3d = getInputTensorQParamOpFusionInfo( + "aten::avg_pool3d", + {"%kernel_size", + "%stride", + "%padding", + "%ceil_mode", + "%count_include_pad", + "%divisor_override"}); + + auto adaptive_avg_pool1d = getInputTensorQParamOpFusionInfo( + "aten::adaptive_avg_pool1d", {"%output_size"}); + + auto adaptive_avg_pool2d = getInputTensorQParamOpFusionInfo( + "aten::adaptive_avg_pool2d", {"%output_size"}); + + auto adaptive_avg_pool3d = getInputTensorQParamOpFusionInfo( + "aten::adaptive_avg_pool3d", {"%output_size"}); + + auto mean1 = getInputTensorQParamOpFusionInfo("aten::mean", {"%dim"}); + + auto mean2 = getInputTensorQParamOpFusionInfo( + "aten::mean", {"%dim", "%keepdim", "%out"}); + + auto upsample_nearest1d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest1d", {"%output_size", "%scale_factors"}); + + auto upsample_nearest2d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest2d", {"%output_size", "%scale_factors"}); + + auto upsample_nearest3d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest3d", {"%output_size", "%scale_factors"}); + + auto upsample_linear1d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_linear1d", + {"%output_size", "%align_corners", "%scale_factors"}); + + auto upsample_bilinear2d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_bilinear2d", + {"%output_size", "%align_corners", "%scale_factors"}); + + auto upsample_trilinear3d_vec = getInputTensorQParamOpFusionInfo( + "aten::upsample_trilinear3d", + {"%output_size", "%align_corners", "%scale_factors"}); + + auto upsample_nearest1d = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest1d", {"%output_size", "%scales"}); + + auto upsample_nearest2d = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest2d", {"%output_size", "%scale_h", "%scale_w"}); + + auto upsample_nearest3d = getInputTensorQParamOpFusionInfo( + "aten::upsample_nearest3d", + {"%output_size", "%scale_d", "%scale_h", "%scale_w"}); + + auto upsample_linear1d = getInputTensorQParamOpFusionInfo( + "aten::upsample_linear1d", {"%output_size", "%align_corners", "%scales"}); + + auto upsample_bilinear2d = getInputTensorQParamOpFusionInfo( + "aten::upsample_bilinear2d", + {"%output_size", "%align_corners", "%scale_h", "%scale_w"}); + + auto upsample_trilinear3d = getInputTensorQParamOpFusionInfo( + "aten::upsample_trilinear3d", + {"%output_size", "%align_corners", "%scale_d", "%scale_h", "%scale_w"}); + + auto clamp = getClampOpFusionInfo("aten::clamp", {"%min", "%max"}); + + auto hardtanh = getClampOpFusionInfo("aten::hardtanh", {"%min", "%max"}); + + auto hardtanh_ = getClampOpFusionInfo("aten::hardtanh_", {"%min", "%max"}); + + auto leaky_relu = + getInputTensorQParamOpFusionInfo("aten::leaky_relu", {"%negative_slope"}); + + auto leaky_relu_ = getInputTensorQParamOpFusionInfo( + "aten::leaky_relu_", {"%negative_slope"}); + + // Ops with fixed quantization parameters + auto hardsigmoid = getFixedQParamOpFusionInfo("aten::hardsigmoid", {}, false); + + auto hardsigmoid_ = + getFixedQParamOpFusionInfo("aten::hardsigmoid_", {}, false); + + auto sigmoid = getFixedQParamOpFusionInfo("aten::sigmoid", {}, false); + + auto sigmoid_ = getFixedQParamOpFusionInfo("aten::sigmoid_", {}, false); + + auto tanh = getFixedQParamOpFusionInfo("aten::tanh", {}, true); + + auto tanh_ = getFixedQParamOpFusionInfo("aten::tanh_", {}, true); + + auto hardswish = getObservedQParamOpFusionInfo( + "aten::hardswish", "quantized::hardswish", {}, {}); + + auto hardswish_ = getObservedQParamOpFusionInfo( + "aten::hardswish_", "quantized::hardswish", {}, {}); + + auto layer_norm = getObservedQParamOpFusionInfo( + "aten::layer_norm", + "quantized::layer_norm", + {"%normalized_shape", "%weight", "%bias", "%eps", "%cudnn_enabled"}, + {"%normalized_shape", "%weight", "%bias", "%eps"}); + + auto group_norm = getObservedQParamOpFusionInfo( + "aten::group_norm", + "quantized::group_norm", + {"%num_groups", "%weight", "%bias", "%eps", "%cudnn_enabled"}, + {"%num_groups", "%weight", "%bias", "%eps"}); + + auto instance_norm = getObservedQParamOpFusionInfo( + "aten::instance_norm", + "quantized::instance_norm", + {"%weight", + "%bias", + "%running_mean", + "%running_var", + "%use_input_stats", + "%momentum", + "%eps", + "%cudnn_enabled"}, + {"%weight", "%bias", "%eps"}); + + return { + {"quantized::conv1d", std::move(conv1d), std::move(quantized_conv1d)}, + {"quantized::conv1d_relu", std::move(conv1d_relu), quantized_conv1d_relu}, + {"quantized::conv1d_relu", + std::move(conv1d_inplace_relu), + std::move(quantized_conv1d_relu)}, + {"quantized::conv2d", std::move(conv2d), std::move(quantized_conv2d)}, + {"quantized::conv2d_relu", std::move(conv2d_relu), quantized_conv2d_relu}, + {"quantized::conv2d_relu", + std::move(conv2d_inplace_relu), + std::move(quantized_conv2d_relu)}, + {"quantized::conv3d", std::move(conv3d), std::move(quantized_conv3d)}, + {"quantized::conv3d_relu", std::move(conv3d_relu), quantized_conv3d_relu}, + {"quantized::conv3d_relu", + std::move(conv3d_inplace_relu), + std::move(quantized_conv3d_relu)}, + {"quantized::conv_transpose1d", + std::move(conv_transpose1d), + std::move(quantized_conv_transpose1d)}, + {"quantized::conv_transpose2d", + std::move(conv_transpose2d), + std::move(quantized_conv_transpose2d)}, + {"quantized::linear", std::move(linear), std::move(quantized_linear)}, + {"quantized::linear_relu", std::move(linear_relu), quantized_linear_relu}, + {"quantized::linear_relu", + std::move(linear_inplace_relu), + std::move(quantized_linear_relu)}, + {"quantized::add_relu", + std::move(add_relu), + quantized_add_relu, + {aten_add_alpha_is_one}}, + {"quantized::add_relu", + std::move(add_inplace_relu), + quantized_add_relu, + {aten_add_alpha_is_one}}, + {"quantized::add_relu", + std::move(inplace_add_relu), + quantized_add_relu, + {aten_add_alpha_is_one}}, + {"quantized::add_relu", + std::move(inplace_add_inplace_relu), + std::move(quantized_add_relu), + {aten_add_alpha_is_one}}, + std::move(add_scalar), + std::move(add_scalar_out), + // note that these must come after quantized::add_scalar and + // quantized::add_scalar_out patterns + {"quantized::add_scalar_relu", + quantized_add_scalar_relu_pattern, + quantized_add_scalar_relu_replacement}, + {"quantized::add_scalar_relu", + quantized_add_scalar_inplace_relu_pattern, + quantized_add_scalar_relu_replacement}, + {"quantized::add_scalar_relu_out", + quantized_add_scalar_relu_out_pattern, + quantized_add_scalar_relu_out_replacement}, + {"quantized::add_scalar_relu_out", + quantized_add_scalar_inplace_relu_out_pattern, + quantized_add_scalar_relu_out_replacement}, + {"quantized::add", + std::move(add), + quantized_add, + {aten_add_alpha_is_one}}, + {"quantized::add", + std::move(inplace_add), + std::move(quantized_add), + {aten_add_alpha_is_one}}, + {"quantized::cat", std::move(cat), std::move(quantized_cat)}, + {"quantized::batch_norm", + std::move(batch_norm), + std::move(quantized_batch_norm)}, + {"quantized::batch_norm_relu", + std::move(batch_norm_relu), + quantized_batch_norm_relu}, + {"quantized::batch_norm_relu", + std::move(batch_norm_inplace_relu), + std::move(quantized_batch_norm_relu)}, + std::move(mul_scalar), + std::move(mul_scalar_out), + // note that these must come after quantized::mul_scalar and + // quantized::mul_scalar_out patterns + {"quantized::mul_scalar_relu", + quantized_mul_scalar_relu_pattern, + quantized_mul_scalar_relu_replacement}, + {"quantized::mul_scalar_relu", + quantized_mul_scalar_inplace_relu_pattern, + quantized_mul_scalar_relu_replacement}, + {"quantized::mul_scalar_relu_out", + quantized_mul_scalar_relu_out_pattern, + quantized_mul_scalar_relu_out_replacement}, + {"quantized::mul_scalar_relu_out", + quantized_mul_scalar_inplace_relu_out_pattern, + quantized_mul_scalar_relu_out_replacement}, + {"quantized::mul_relu", std::move(mul_relu), quantized_mul_relu}, + {"quantized::mul_relu", std::move(mul_inplace_relu), quantized_mul_relu}, + {"quantized::mul_relu", std::move(inplace_mul_relu), quantized_mul_relu}, + {"quantized::mul_relu", + std::move(inplace_mul_inplace_relu), + std::move(quantized_mul_relu)}, + {"quantized::mul", std::move(mul), quantized_mul}, + {"quantized::mul", std::move(inplace_mul), std::move(quantized_mul)}, + std::move(hardswish), + std::move(hardswish_), + std::move(layer_norm), + std::move(group_norm), + std::move(instance_norm), + {"quantized::elu", std::move(elu), quantized_elu}, + {"quantized::elu_", std::move(elu_), std::move(quantized_elu)}, + std::move(avg_pool1d), + std::move(avg_pool2d), + std::move(avg_pool3d), + std::move(adaptive_avg_pool1d), + std::move(adaptive_avg_pool2d), + std::move(adaptive_avg_pool3d), + std::move(mean1), + std::move(mean2), + std::move(upsample_nearest1d), + std::move(upsample_nearest2d), + std::move(upsample_nearest3d), + std::move(upsample_linear1d), + std::move(upsample_bilinear2d), + std::move(upsample_trilinear3d), + std::move(upsample_nearest1d_vec), + std::move(upsample_nearest2d_vec), + std::move(upsample_nearest3d_vec), + std::move(upsample_linear1d_vec), + std::move(upsample_bilinear2d_vec), + std::move(upsample_trilinear3d_vec), + std::move(clamp), + std::move(hardtanh), + std::move(hardtanh_), + std::move(leaky_relu), + std::move(leaky_relu_), + // fixed qparam ops + std::move(hardsigmoid), + std::move(hardsigmoid_), + std::move(sigmoid), + std::move(sigmoid_), + std::move(tanh), + std::move(tanh_), + }; +} + +inline std::vector +dynamic_quantized_linear_pattern_and_replacements() { + std::string linear_dynamic = R"( +graph(%packed_params, %a): + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a, %w_dequant, %b) + return (%r) )"; + + // This pattern ignores reduce range + // Set the reduce range to default to true, since qnnpack backend ignores this + // argument. + std::string quantized_linear_dynamic = R"( +graph(%packed_params, %a): + %reduce_range : bool = prim::Constant[value=1]() + %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range) + return (%r) )"; + + return { + {"quantized::linear_dynamic", + std::move(linear_dynamic), + std::move(quantized_linear_dynamic)}, + }; +} + +static std::vector +dynamic_quant_fusion_pattern_and_replacements() { + std::string linear_dynamic = R"( +graph(%packed_params, %a, %reduce_range, %a_dtype): + %a_scale : float, %a_zero_point : int = aten::_choose_qparams_per_tensor(%a, %reduce_range) + %a_quant = aten::quantize_per_tensor(%a, %a_scale, %a_zero_point, %a_dtype) + %a_dequant = aten::dequantize(%a_quant) + %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a_dequant, %w_dequant, %b) + return (%r) )"; + + std::string quantized_linear_dynamic = R"( +graph(%packed_params, %a, %reduce_range, %a_dtype): + %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range) + return (%r) )"; + + std::string linear_dynamic_fp16 = R"( +graph(%packed_params, %a): + %w_unpacked : Tensor, %b : Tensor? = quantized::linear_unpack_fp16(%packed_params) + %r = aten::linear(%a, %w_unpacked, %b) + return (%r) )"; + + std::string quantized_linear_dynamic_fp16 = R"( +graph(%packed_params, %a): + %r = quantized::linear_dynamic_fp16(%a, %packed_params) + return (%r) )"; + + return { + {"quantized::linear_dynamic", + std::move(linear_dynamic), + std::move(quantized_linear_dynamic)}, + {"quantized::linear_dynamic_fp16", + std::move(linear_dynamic_fp16), + std::move(quantized_linear_dynamic_fp16)}, + }; +} + +static std::vector linear_prepack_unpack_patterns() { + std::string linear_with_quant = R"( +graph(%a_dequant, %w_quant, %b): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::linear(%a_dequant, %w_dequant, %b) + return (%r) )"; + + std::string linear_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b): + %packed_params = quantized::linear_prepack(%w_quant, %b) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::linear(%a_dequant, %w_dequant, %b_unpacked) + return (%r) )"; + std::string linear_fp16_with_cast = R"( +graph(%w, %a_dq, %b): + %fp16_tensor = aten::_saturate_weight_to_fp16(%w) + %r = aten::linear(%a_dq, %fp16_tensor, %b) + return (%r) )"; + std::string linear_fp16_with_prepack = R"( +graph(%w, %a_dq, %b): + %packed_params = quantized::linear_prepack_fp16(%w, %b) + %w_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack_fp16(%packed_params) + %r = aten::linear(%a_dq, %w_unpacked, %b_unpacked) + return (%r) )"; + + return { + {"linear_prepack_unpack", + std::move(linear_with_quant), + std::move(linear_with_quant_prepack)}, + {"linear_fp16_prepack_unpack", + std::move(linear_fp16_with_cast), + std::move(linear_fp16_with_prepack)}, + }; +} + +static std::vector conv_prepack_unpack_patterns() { + std::string conv1d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv1d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv1d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv2d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv2d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv2d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv3d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv3d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups): + %packed_params : __torch__.torch.classes.quantized.Conv3dPackedParamsBase = quantized::conv3d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv3d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv3d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups) + return (%r) )"; + + std::string conv_transpose1d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + std::string conv_transpose1d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose1d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose1d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + std::string conv_transpose2d_with_quant = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %w_dequant = aten::dequantize(%w_quant) + %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + std::string conv_transpose2d_with_quant_prepack = R"( +graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation): + %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose2d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups) + %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose2d_unpack(%packed_params) + %w_dequant = aten::dequantize(%w_quant_unpacked) + %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation) + return (%r) )"; + + return { + {"conv1d_prepack_unpack", + std::move(conv1d_with_quant), + std::move(conv1d_with_quant_prepack)}, + {"conv2d_prepack_unpack", + std::move(conv2d_with_quant), + std::move(conv2d_with_quant_prepack)}, + {"conv3d_prepack_unpack", + std::move(conv3d_with_quant), + std::move(conv3d_with_quant_prepack)}, + {"conv_transpose1d_prepack_unpack", + std::move(conv_transpose1d_with_quant), + std::move(conv_transpose1d_with_quant_prepack)}, + {"conv_transpose2d_prepack_unpack", + std::move(conv_transpose2d_with_quant), + std::move(conv_transpose2d_with_quant_prepack)}}; +} + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h new file mode 100644 index 0000000000000000000000000000000000000000..ac4afe90ed9ea577a760becf2a2d760a6bd74d60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h @@ -0,0 +1,15 @@ +#pragma once +#include +#include + +namespace torch { +namespace jit { + +// Quantization type (dynamic quantization, static quantization). +// Should match the Python enum in quantize_jit.py +enum QuantType : std::uint8_t { DYNAMIC = 0, STATIC }; + +std::ostream& operator<<(std::ostream& os, QuantType t); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h new file mode 100644 index 0000000000000000000000000000000000000000..c1cbf1b27bb32296d70cc7b2a943750665772daa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +using PrePackParamFilterFn = std::function; + +TORCH_API std::unordered_set RegisterPrePackParams( + Module& m, + const std::string& method_name, + const PrePackParamFilterFn& is_packed_param, + const std::string& attr_prefix); + +TORCH_API std::string joinPaths(const std::vector& paths); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..19cf29d5de290be1d1b73fffef0b2d2aaadb5f38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void removeDropout(std::shared_ptr& graph); + +TORCH_API void removeDropout(script::Module& module); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h new file mode 100644 index 0000000000000000000000000000000000000000..8a484a839e552dc02a4a04e660d9d173e473f2c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void RemoveExpands(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h new file mode 100644 index 0000000000000000000000000000000000000000..eb8cf195ee4ca19ce399435e8586d4eecb8b3397 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API MutationRemover { + MutationRemover( + std::shared_ptr graph, + c10::optional> mutation_filter = c10::nullopt) + : mutation_filter_(mutation_filter), + aliasDb_(nullptr), + graph_(std::move(graph)) {} + + // return true if graph is modified + bool removeListMutation(); + + // return true if graph is modified + bool removeTensorMutation(); + + bool isSpecialMappedOp(Node* n) { + return n->matches("aten::zero_(Tensor(a!) self) -> Tensor(a!)") || + n->matches( + "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)") || + n->matches( + "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)"); + } + + bool inplaceOpVariant(Node* n); + + static bool hasSideEffectOrAlias(Value* v, AliasDb* aliasDb); + + private: + Node* createSpecialMappedOp(Node* n); + bool listMutationFollowingListConstruct(Node* n); + bool tryMakeCreationAndMutationAtomic( + Value* mutated_value, + Node* mutating_op); + bool tryMakeUnaliasedIfOutputAndMutationAtomic( + Value* mutated_value, + Node* mutating_op); + // return true if graph is modified + bool RemoveListMutation(Block* block); + // return true if graph is modified + bool RemoveTensorMutation(Block* block); + + AliasDb* getOrCreateAliasDb() { + if (!aliasDb_) { + aliasDb_ = std::make_unique(graph_); + } + return aliasDb_.get(); + } + + c10::optional> mutation_filter_; + std::unique_ptr aliasDb_ = nullptr; + std::shared_ptr graph_; +}; + +// Removes list mutation with functional equivalents +// return true if graph is modified +TORCH_API bool RemoveListMutation(const std::shared_ptr& graph); + +// Replaces in-place aten ops with their functional equivalents +// when it can be proven that this does not change graph semantics +// if `mutation_filter` is present, the pass will only attempt to +// remove mutation on nodes which return true for the filter +// return true if graph is modified +TORCH_API bool RemoveTensorMutation( + const std::shared_ptr& graph, + c10::optional> mutation_filter = c10::nullopt); + +// Replaces in-place aten activation ops with their functional equivalence +TORCH_API bool InplaceToFunctionalActivation( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h new file mode 100644 index 0000000000000000000000000000000000000000..b574786c0bb1cf1269816edec2ef5d13980bd5e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void RemoveRedundantProfiles(std::shared_ptr& graph); +TORCH_API void RemoveRedundantProfiles(Block* block, AliasDb& db); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h new file mode 100644 index 0000000000000000000000000000000000000000..1f3fbf6cac88d082a1a55ae0b9b85667f8d83561 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Find the valid upgrader graph for the upgrader and cache the result +// for later lookups. Will error out if there is no valid upgrader graph +// provided for the upgrader name. +std::shared_ptr getUpgraderGraph(const std::string& upgrader_name); + +TORCH_API void ReplaceOldOperatorsWithUpgraders(std::shared_ptr graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h new file mode 100644 index 0000000000000000000000000000000000000000..48ce9fdb9ed208441959974b015026bda98f7f06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// A map which stores if an activation operator can perform type promotion +const std::unordered_map activation_type_promotion_mapping = { + {aten::sigmoid, true}, + {aten::tanh, true}, + {aten::celu, false}, + {aten::elu, false}, + {aten::gelu, false}, + {aten::glu, false}, + {aten::hardshrink, false}, + {aten::hardsigmoid, false}, + {aten::hardswish, false}, + {aten::hardtanh, false}, + {aten::leaky_relu, false}, + {aten::prelu, false}, + {aten::relu6, false}, + {aten::relu, false}, + {aten::rrelu, false}, + {aten::selu, false}, + {aten::silu, false}}; + +class FunctionalToInplaceRewriter { + public: + FunctionalToInplaceRewriter(std::shared_ptr graph); + + bool FunctionalToInplace(Block* block); + + private: + AliasDb* getOrCreateAliasDb() { + if (!aliasDb_) { + aliasDb_ = std::make_unique(graph_); + } + return aliasDb_.get(); + } + + bool CanBeInplace(Node* node); + + std::unique_ptr aliasDb_ = nullptr; + std::shared_ptr graph_; +}; + +// A common application scenario is to apply InplaceToFunctionalActivation +// before some JIT optimization passes, so that those passes are less +// constrained by in-place ops. After those passes are done, we can call +// FunctionalToInplaceActivation to recover in-place activation ops, +// so that we won't lose the performance benefit coming from memory reduction. + +// Replaces functional aten activation ops with their in-place equivalents +TORCH_API bool FunctionalToInplaceActivation( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..670072a0b09b45337cd8bd80eb5bd9e12ee7f0dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Graph; + +struct propagation_error : std::exception {}; + +class PropertyPropBase { + // Used for both Shape Propagation and Dtype/Device Propagation + public: + explicit PropertyPropBase(std::shared_ptr graph) + : graph_(std::move(graph)) {} + virtual ~PropertyPropBase() = default; + + void propagateBlock(Block* block, bool insert_expands = true); + // insert_expands is used for shape inference + + void processIf(Node* node); + void processLoop(Node* node); + + protected: + virtual void propagateNode(Node* node, bool insert_expands = true) = 0; + void setUnshapedType(Value* o); + void setUnshapedType(Node* node); + std::shared_ptr graph_; +}; + +TORCH_API void EraseShapeInformation(const std::shared_ptr& graph); +TORCH_API void PropagateInputShapes(const std::shared_ptr& graph); + +TORCH_API bool mergeTypes( + ArrayRef lhs, + ArrayRef rhs, + ArrayRef outputs); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..414d699d2e4cb762d8e759081b761345f5cd55aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace jit { + +// Takes in a TensorExprGraph of static shapes and generalizes the input shapes +// to symbolic dimensions. Dimensions of value 1 will be preserved, otherwise +// dimensions with the same value will be bucketed to the same symbolic shape. +// E.g. Tensor(5, 3), Tensor(3, 1) -> Tensor(SS(-1), SS(-2)), Tensor(SS(-2), 1) +// From there, runs symbolic shape inference on the graph, and creates a +// versioning if in the graph with prim::TensorExprDynamicGuard checking if +// the inputs at runtime match the Generalized Symbolic Shapes that are inputs +// to the TE Kernel. The computate to calculate all symbolic dimensions is +// inlined in to the if block with the TE Kernel. All Sym Dim Value* are +// appended to the end of the TE Kernel Graph/Node inputs, and the Node is +// augmented with a integer list attr `symbolic_shape_inputs` that gives the +// mapping from Value * -> Symbolic Shape int64_t value. For more lengthy IR +// examples and walkthrough look at ShapeAnalysisTest.DynamicShapesFusion in +// `test_shape_analysis` Returns True on Success, False on Failure, can fail if +// shape propagation fails to propagate # of dims or if complete shapes on +// inputs not set + +TORCH_API bool GenerateGuard( + Node* tensorexpr_graph_node, + bool add_composed_op = false); + +TORCH_API void runTensorExprDynamicGroup(const Code& code, Stack& stack); + +enum class StrideInput { + // Tensors natively store whether they are contiguous or not as a property + // this makes it faster to query `is_contiguous` or + // `is_contiguous(memory_format=channels_last)` + // than looping through the sizes/strides yourself + // For tensors with these properties, we only store one value: + TENSOR_CONT, + TENSOR_CONT_CHANNELS_LAST, + // now, we describe other cases, where there is one stride enum + // per dimension + S_ONE, // STRIDE_ONE: packed + S_CONT, // STRIDE_CONTIGUOUS: stride[i + 1] * sizes[i + 1] + S_TRAN_CONT, // STRIDE_TRANSPOSED_CONTIGUOUS: stride[i-1] * sizes[i-1] + S_AS_ARG, // STRIDE_AS_ARG: stride passed in as runtime value +}; + +TORCH_API std::string toString(StrideInput si); +TORCH_API StrideInput strideInputFromString(const std::string& si); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h new file mode 100644 index 0000000000000000000000000000000000000000..d951982fde2990f335ed26e688767b349ac1cb5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +// Run TensorExpressions-based fuser. +// If add_composed_op is true, creates a single operation that +// performs both the runtime check that types align +// and then the dispatch to the kernel/unoptimized graph +TORCH_API void FuseTensorExprs( + std::shared_ptr& graph, + size_t min_group_size = 2, + bool add_composed_op = false, + bool fuse_to_dynamic_shapes = false); + +TORCH_API void setTensorExprFuserEnabled(bool val); +TORCH_API bool tensorExprFuserEnabled(); +TORCH_API void setTensorExprDynamicShapeFusionEnabled(bool val); +TORCH_API bool tensorExprDynamicShapeFusionEnabled(); +TORCH_API bool setTexprReductionsEnabled(bool value); +TORCH_API bool texprReductionsEnabled(); + +TORCH_API void RemoveProfileNodesAndSpecializeTypes( + std::shared_ptr& graph); +TORCH_API bool hasTensorTypeSpecialization(Value* v); +TORCH_API void RemoveTensorTypeSpecializations(std::shared_ptr& graph); +TORCH_API void removeTensorTypeSpecializations(Block* block); + +using tensor_type_converter_t = + c10::function_ref; + +// inserts a TypeCheck pattern +// +// around the guarded node that has a Subgraph attribute, this inserts a pattern +// +// if TypeCheck(...): +// guarded_node +// else: +// FallbackGraph(...) +// +// The TypeCheck includes the types of all Tensor inputs to the guarded_node, +// as processed by the type_converter, a lambda +// TensorTypePtr(const TensorTypePtr& t). This allows to erase irrelevant +// aspects of the type. +// +// The Fallback graph will have the same subgraph as the guarded node (with the +// expectation that the guarded_node's subgraph will then be optimized. +TORCH_API void insertTypeGuard( + Node* guarded_node, + tensor_type_converter_t type_converter, + c10::Symbol kind); + +TORCH_API bool usedOnlyInSize(Value* v); +TORCH_API Value* broadcastSizes(at::ArrayRef sizes, AliasDb* db); + +namespace tensorexpr { +TORCH_API bool isSupported(Node* node); + +/// Get the modifiable custom operator set object. +/// +/// For static shapes, if a custom operator has been added to the custom +/// operator set, it will be pulled into the NNC fusion group. But it doesn't +/// work with dynamic shapes unless explicitly register the shape function via +/// `torch::jit::RegisterShapeComputeGraphForSchema` for the custom operator. +/// +/// @return Reference of the custome operator set +/// +TORCH_API OperatorSet& getCustomOperatorSet(); +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..eb51ba00c4c9f8d2ca07dd96def6f5e168160e35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Because differentiable graphs detach the gradients of input Tensors, +// creating and inlining differentiable graphs changes the requires_grad +// property of tensors in the graph. This pass updates prim::profiles +// requires_grad to keep profiled properties up to date, it does not update +// grad properties of other nodes like graph inputs bc the only downstream +// user of the grad property is the profiling executor, which just uses +// the types of prim::profiles +TORCH_API void UpdateDifferentiableGraphRequiresGrad( + std::shared_ptr& diff_forward_graph, + c10::optional new_requires_grad); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h new file mode 100644 index 0000000000000000000000000000000000000000..df491c8ea3d5a85fd53cc9fe9edffce9d76f7910 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Verify that alias annotations are correct. See impl for definition of +// "correct". +// +// This function expects a graph with a single op with `unqualifiedOpName`, plus +// the inputs that you would otherwise have passed to the graph executor. +TORCH_API void checkAliasAnnotation( + const std::shared_ptr& graph, + std::vector pythonInputs, + const std::string& unqualifiedOpName); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h new file mode 100644 index 0000000000000000000000000000000000000000..f3068588dae8531d1b79162b215ada28a942c3d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// Uses a compressed index representation for faster comparisons +typedef c10::SparseBitVector<256> MemoryLocations; +namespace torch { +namespace jit { + +struct Value; + +using AliasTypeSet = std::vector; + +// `Element` represents a vertex in the points-to graph. It represents +// anything that could have an aliasing relationship--mostly IR +// `Value`s, but also wildcards or the type inside a container (e.g. `T` +// in `List[T]`) +struct Element { + Element(const Value* value_, unsigned index_); + // wildcard constructor + explicit Element(unsigned index_); + + // Index into the owning DAG's bit vector that represents this element. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + unsigned index; + + // All elements that this element *may* point to. It's possible to have + // multiple elements that you might point to due to control flow/complex ops + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + MemoryLocations pointsTo; + // Backreference for points-to. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + MemoryLocations pointedFrom; + + // Elements can contain other elements (e.g. List[Tensor]) + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + MemoryLocations containedElements; + + // The values that this element corresponds to. May be empty if this element + // doesn't represent a first-class value. + // This is for debug information only. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::unordered_set values; + + private: + // Make `from` point at `to`. + void makePointerTo(Element* from, Element* to); + + friend class MemoryDAG; + // We memoize the results of `getMemoryLocations` to speed up queries. + // A nullopt means that this cache is not yet populated. Since `MemoryDAG` is + // immutable, this cache should never need to be invalidated. + mutable c10::optional cachedMemoryLocations_; + + mutable c10::optional cachedAllContainedMemoryLocations_; +}; + +// class MemoryDAG +// +// This class tracks the "A points to B" graph for all values. It is used by +// AliasDb to provide a higher-level API. +// +// We maintain a DAG where: +// - Vertices (called "Elements") represent Values and +// other aliasing entities (e.g. the stuff inside a list) +// - Edges represent a "points-to" relationship. +// +// Leaves in this DAG are entities that don't point to anything, and thus +// correspond to unique "memory locations". +// +// So, by traversing the "points-to" graph to the leaves, you can determine +// which memory locations an element may point to. +class TORCH_API MemoryDAG { + public: + explicit MemoryDAG(std::vector> indexToElementMap) + : indexToElementMap_(std::move(indexToElementMap)) {} + // explicitly delete copy constructor because otherwise windows build is + // confused for an exported class see + // https://stackoverflow.com/a/51033485/105137 + MemoryDAG(const MemoryDAG&) = delete; + MemoryDAG& operator=(const MemoryDAG&) = delete; + + // Return the unique memory locations that `Element` might represent. + const MemoryLocations& getMemoryLocations(const Element* e) const; + + // Do `a` and `b` potentially share a memory location? + bool mayAlias(const Element* a, const Element* b) const; + + // Does `a` hold reference to any memory that is stored in `b`, or vice versa? + bool mayContainAlias(const Element* a, const Element* b) const; + + bool mayContainAlias(const Element* a, const at::ArrayRef b) const; + + bool mayContainAlias( + const at::ArrayRef a, + const at::ArrayRef b) const; + + // Converts from the compressed index representation + const Element* fromIndex(unsigned x) const; + Element* fromIndex(unsigned x); + void collectAllContainedMemoryLocations( + const Element* elem, + MemoryLocations& cont) const; + + /** + * The following methods are special cases where we need to mutate the + * internals of MemoryDAG for efficiency reasons. Don't call them unless you + * know what you're doing! In particular, don't add new mutating methods + * without ensuring that you are maintaining cache consistency for memory + * locations. + */ + + // Adding wildcards can trigger extremely expensive cache invalidations. This + // method adds them in a more efficient cache-aware way. + void setWildcards( + const std::unordered_set& wildcards, + const ska::flat_hash_map& elementMap, + const std::function& getWildcardElement); + Element* unsafeMakeFreshValue(const Value* v); + + private: + const MemoryLocations& getAllContainedMemoryLocations( + const Element* elem) const; + void collectAllContainedMemoryLocationsImpl( + const Element* elem, + MemoryLocations& cont) const; + std::vector> indexToElementMap_; +}; + +/** + * Helper to build up the points-to graph. + * + * We separate the "building" into a different class because it allows us to + * cache internally to MemoryDAG without worrying about how the DAG structure + * is mutated. + */ +class TORCH_API MemoryDAGBuilder { + public: + MemoryDAGBuilder() = default; + MemoryDAGBuilder(const MemoryDAGBuilder&) = delete; + MemoryDAGBuilder& operator=(const MemoryDAGBuilder&) = delete; + + // Make `from` point at `to`. + void makePointerTo(Element* from, Element* to); + + void addToContainedElements(Element* contained, Element* container); + + std::unique_ptr createMemoryDAG() && { + return std::make_unique(std::move(indexToElementMap_)); + } + + // Make a fresh Element (i.e. an Element that doesn't point to anything) and + // return it. + Element* makeFreshValue(const Value* v); + + friend MemoryDAG; + + private: + // `MemoryDAGBuilder` builds up `indexToElementMap_`, then uses + // the map to construct the `MemoryDAG` + std::vector> indexToElementMap_; +}; +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..d68d1d6192d6c3b039181a2ebce3026cf9cee8d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +// Moved from shape_analysis.cpp + +// Requirements: +// dims : preserved from the first argument +// scalar type : preserved from the first argument (doesn't have to +// match other arguments) +// device : always matching and preserved +// tensor inputs : * +// tensor outputs : 1 +// NB: those ops (with slight adjustments) are good candidates for restarts. +// Knowing the type and device of weights or biases is usually enough to +// infer the output type. +std::shared_ptr nn_ops_first_input_preserving(); + +// Requirements: +// dims : Changed from first argument +// scalar type : preserved from the first argument +// device : always matching and preserved +// tensor inputs : 1 +// tensor outputs : 1 +std::shared_ptr ops_one_tensor_in_shape_transform(); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/optimization_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/optimization_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..6018fbea6daa9e3b28c9aa45fdeeb25bc162cf37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/optimization_utils.h @@ -0,0 +1,14 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +// Checks if the parameters, not including the +// first param are all constants. +bool nonConstantParameters(Node* n); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/subgraph_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/subgraph_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..dd761409ca2d008260119e01ed731862d5b26658 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/subgraph_utils.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +// Utilities for dealing with nodes that contain subgraphs. +// +// They handle the complexity of editing inputs/outputs as you merge nodes in +// and out of subgraphs. +namespace SubgraphUtils { + +// Create a new subgraph node that contains only `n`. The new subgraph will have +// `subgraphKind` as its type. +// +// `n` is destroyed. +// +// Returns the new subgraph node. +TORCH_API Node* createSingletonSubgraph(Node* n, Symbol subgraphKind); + +// Creates a new subgraph that only contains `n`, amd updates the new outputs +// of the subgraph to have the aliasing properties of the original `n` outputs +TORCH_API Node* createSingletonSubgraphAndUpdateAliasing( + Node* to_merge, + Symbol subgraphKind, + AliasDb& db); + +// Merge a node into a subgraph node. If `toMerge` is also a subgraph, the +// subgraphs are merged. +// If `destroyNode` is true `toMerge` is destroyed. +// An optional argument 'vmap' could be used to retrieve value mappings. +// Values will be mapped to their new subgraph values +TORCH_API void mergeNodeIntoSubgraph( + Node* toMerge, + Node* subgraphNode, + bool destroyNode = true); + +// Merges a node into a subgraph node, and updates the new outputs of the +// subgraph to have the aliasing properties of the corresponding `to_merge` +// outputs +TORCH_API void mergeNodeIntoSubgraphAndUpdateAliasing( + Node* to_merge, + Node* subgraphNode, + AliasDb& db); + +TORCH_API std::vector unmergeAliasedOutputs( + Node* subgraphNode, + AliasDb& db); + +// Move nodes from a subgraph node to the outer graph. +// `subgraphNode` is destroyed. +TORCH_API void unmergeSubgraph(Node* subgraphNode); + +// Move `node_to_unmerge` and its descendants after `subgraphNode` +// promotes any dependencies of `node_to_unmerge` to subgraphNode outputs +TORCH_API void unmergeNode(Node* node_to_unmerge, Node* subgraphNode); + +TORCH_API bool unmergeOutputsAlisingInputs(Node* subgraphNode); + +TORCH_API bool unmergeAliasedOutputs(Node* subgraphNode); + +// Convenience function +std::shared_ptr getSubgraph(Node* n); + +TORCH_API std::string generateNameForGraph( + const std::shared_ptr& graph, + size_t maxlen = 40, + const std::string& prefix = "fused"); + +} // namespace SubgraphUtils +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..aa2ab4ea421f5cbba34d9cb973cb8ebe7bf5800d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// Refine from Value of type List -> len of list +// If a refinement mapping of List Value * -> len is present in a block +// the list is guaranteed to be that length +// TODO: vector may be faster +using ListRefinement = std::unordered_map; + +TORCH_API ListRefinement +intersectRefinements(const ListRefinement& ref1, const ListRefinement& ref2); + +TORCH_API ListRefinement +unionRefinements(const ListRefinement& ref1, const ListRefinement& ref2); + +// Represents the refinement information that can be carried on a boolean +struct BooleanRefinementMapping { + BooleanRefinementMapping( + ListRefinement true_refine, + ListRefinement false_refine) + : true_refine_(std::move(true_refine)), + false_refine_(std::move(false_refine)){}; + BooleanRefinementMapping() = default; // empty + + static BooleanRefinementMapping FalseRefinements( + ListRefinement false_refine) { + return BooleanRefinementMapping({}, std::move(false_refine)); + } + + static BooleanRefinementMapping TrueRefinements(ListRefinement true_refine) { + return BooleanRefinementMapping(std::move(true_refine), {}); + } + + BooleanRefinementMapping intersectBooleanRefinementMapping( + BooleanRefinementMapping& other) { + return BooleanRefinementMapping( + intersectRefinements(true_refine_, other.true_refine()), + intersectRefinements(false_refine_, other.false_refine())); + } + + ListRefinement& true_refine() { + return true_refine_; + } + + ListRefinement& false_refine() { + return false_refine_; + } + + private: + ListRefinement true_refine_; + ListRefinement false_refine_; +}; + +TORCH_API void joinIfRefinements( + Node* if_node, + std::unordered_set& throwing_blocks, + ListRefinement& curr_block_refinements, + ListRefinement& true_block_refinements, + ListRefinement& false_block_refinements, + std::unordered_map& info); + +// handles adding blocks to throwing blocks and propagating refinements via +// boolean comparisons +TORCH_API bool handleCommonRefinentOperators( + Node* n, + std::unordered_set& throwing_blocks, + std::unordered_map& info); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd89bbcc22fffffef677a570923d469a6f129bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Try to replace an op that takes a list input with another op that takes a +// variadic number of arguments. +TORCH_API bool UseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +TORCH_API bool RemoveListMutationAndUseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +// Convenient functions for replacing aten::stack/aten::cat with their +// variadic versions. +TORCH_API bool UseVariadicCat(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicCat( + const std::shared_ptr& graph); + +TORCH_API bool UseVariadicStack(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicStack( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..395d885e8e2c3c99f5e1a6d4279c9e0e26894d07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/vulkan_rewrite.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +TORCH_API void vulkanInsertPrePackedOps(std::shared_ptr& graph); +TORCH_API void vulkanInsertPrePackedOps(script::Module& module); +TORCH_API void vulkanFusePrePackedConvWithClamp(script::Module& module); +TORCH_API void vulkanFoldPrePackingOps(script::Module& module); +TORCH_API script::Module vulkanOptimizeForMobile( + const script::Module& module, + const std::set& optimization_blocklist, + const std::vector& preserved_methods); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..d1a64c52c9230ad85a3c3540e120b48532abd707 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void transformConv1dToConv2d(std::shared_ptr& graph); +TORCH_API void transformConv1dToConv2d(script::Module& module); +TORCH_API void insertPrePackedOps(std::shared_ptr& graph); +TORCH_API void insertPrePackedOps(script::Module& module); +TORCH_API void fusePrePackedLinearConvWithClamp(script::Module& module); +TORCH_API void FoldPrePackingOps(script::Module& module); +TORCH_API script::Module optimizeForMobile( + const script::Module& module, + const std::set& optimization_blocklist = {}, + const std::vector& preserved_methods = {}); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_sugared_value.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_sugared_value.h new file mode 100644 index 0000000000000000000000000000000000000000..35298e30b08a6dfdeda1417a4ce0080649e87c84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_sugared_value.h @@ -0,0 +1,376 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +std::string typeString(py::handle h); + +inline std::shared_ptr toSimple(Value* v) { + return std::make_shared(v); +} + +// NB: This should be the single entry-point for instantiating a SugaredValue +// from a Python object. If you are adding support for converting a new Python +// type, *add it in this function's implementation*. +std::shared_ptr toSugaredValue( + py::object obj, + GraphFunction& m, + const SourceRange& loc, + bool is_constant = false); + +c10::optional as_function(const py::object& obj); + +struct VISIBILITY_HIDDEN PythonValue : public SugaredValue { + PythonValue( + py::object the_self, + c10::optional rcb = c10::nullopt, + Value* module_self = nullptr) + : self(std::move(the_self)), + rcb(std::move(rcb)), + moduleSelf_(module_self) {} + + FunctionSchema getSchema( + const size_t n_args, + const size_t n_binders, + const SourceRange& loc); + + // call it like a function, e.g. `outputs = this(inputs)` + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::string kind() const override; + + std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const c10::optional& size_hint = {}) override; + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + Value* asValue(const SourceRange& loc, GraphFunction& m) override { + throw ErrorReport(loc) + << kind() << " cannot be used as a value. " + << "Perhaps it is a closed over global variable? If so, please " + << "consider passing it in as an argument or use a local varible " + << "instead."; + } + + protected: + py::object getattr(const SourceRange& loc, const std::string& name); + + void checkForAddToConstantsError(std::stringstream& ss); + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + py::object self; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + c10::optional rcb; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + Value* moduleSelf_ = nullptr; +}; + +struct VISIBILITY_HIDDEN PythonModuleValue : public PythonValue { + explicit PythonModuleValue(py::object mod) : PythonValue(std::move(mod)) {} + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; +}; + +// Used for desugaring uses of the torch.cuda module. All the CUDA APIs with +// torch.cuda.* are resolved using CUDAPythonModuleValue. +struct VISIBILITY_HIDDEN CUDAPythonModuleValue : public PythonValue { + explicit CUDAPythonModuleValue(py::object mod) + : PythonValue(std::move(mod)) {} + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; +}; + +// Represents all the parameters of a module as a List[Tensor] +struct VISIBILITY_HIDDEN ConstantParameterList : public SugaredValue { + ConstantParameterList(Value* the_list) : the_list_(the_list) {} + std::string kind() const override { + return "constant parameter list"; + } + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& caller, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + return toSimple(the_list_); + } + + private: + Value* the_list_; +}; + +struct VISIBILITY_HIDDEN ModuleDictMethod : public SugaredValue { + explicit ModuleDictMethod(SugaredValuePtr iterable, std::string name) + : iterable_(std::move(iterable)), name_(std::move(name)){}; + + std::string kind() const override { + return name_; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& f, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + if (!args.empty() || !kwargs.empty()) { + throw ErrorReport(loc) + << name_ << " method does not accept any arguments"; + } + return iterable_; + } + + SugaredValuePtr iterable_; + const std::string name_; +}; + +struct SugaredDict; + +// defines how modules/methods behave inside the script subset. +// for now this does not have any interaction with python. +// in the future, we will add the ability to resolve `self.foo` to python +// {functions, modules, constants} so this SugaredValue is defined here +// anticipating we will eventually need to replace Module with a py::object +// holding the actual nn.Module class. + +struct VISIBILITY_HIDDEN ModuleValue : public SugaredValue { + ModuleValue(Value* self, std::shared_ptr concreteType) + : self_(self), concreteType_(std::move(concreteType)) {} + + std::string kind() const override { + return "module"; + } + + Value* asValue(const SourceRange& loc, GraphFunction& m) override; + + SugaredValuePtr asTupleValue(const SourceRange& loc, GraphFunction& m) + override; + + // select an attribute on it, e.g. `this.field` + std::shared_ptr tryGetAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field); + + // select an attribute on it, e.g. `this.field` + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + // select an attribute on it, e.g. `this.field` + bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + // call module.forward with pre_hooks and hooks + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& caller, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::shared_ptr getSugaredDict( + const SourceRange& loc, + GraphFunction& m); + + std::shared_ptr getSugaredNamedBufferDict( + const SourceRange& loc, + GraphFunction& m); + + std::shared_ptr getSugaredNamedParameterList( + const SourceRange& loc, + GraphFunction& m); + + std::shared_ptr getSugaredNamedParameterDict( + const SourceRange& loc, + GraphFunction& m); + + void setAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field, + Value* newValue) override; + + SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override; + + std::shared_ptr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint) override; + + private: + // Check that the type of all submodules is a subtype of ty. If the function + // returns false, more information about why it returns false (e.g. which + // submodule's type is not a subtype of ty) is printed it why_not if it is not + // null. + bool areAllSubmodulesSubtypeOf( + const TypePtr& ty, + std::ostream* why_not = nullptr) const; + + Value* self_; + std::shared_ptr concreteType_; +}; + +bool isNamedTupleClass(const py::object& obj); +TypePtr registerNamedTuple( + const py::object& obj, + const SourceRange& loc, + const ResolutionCallback& rcb); + +void recurseThroughNestedModules( + const SourceRange& loc, + GraphFunction& m, + std::vector& keys, + std::vector& values, + std::shared_ptr& self, + const std::string& prefix, + const std::string& field); + +// Used to support named_modules() +struct VISIBILITY_HIDDEN SugaredDict : public SugaredValue { + explicit SugaredDict( + std::shared_ptr self, + std::shared_ptr keys, + std::shared_ptr modules) + : self_(std::move(self)), + keys_(std::move(keys)), + modules_(std::move(modules)) {} + + std::string kind() const override { + return "ModuleDict"; + } + + std::shared_ptr getKeys() { + return keys_; + } + + std::shared_ptr getModules() { + return modules_; + } + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override { + return keys_; + }; + + std::shared_ptr self_; + std::shared_ptr keys_; + std::shared_ptr modules_; +}; + +struct VISIBILITY_HIDDEN BooleanDispatchValue : public SugaredValue { + BooleanDispatchValue(py::dict dispatched_fn) + : dispatched_fn_(std::move(dispatched_fn)) {} + + std::string kind() const override { + return "boolean dispatch"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& caller, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + private: + py::dict dispatched_fn_; +}; + +struct VISIBILITY_HIDDEN PythonClassValue : public ClassValue { + PythonClassValue(ClassTypePtr type, py::object py_type) + : ClassValue(std::move(type)), py_type_(std::move(py_type)) {} + + std::string kind() const override { + return "Python type"; + } + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + private: + py::object py_type_; +}; + +struct VISIBILITY_HIDDEN PythonExceptionValue : public ExceptionValue { + explicit PythonExceptionValue(const py::object& exception_class) + : ExceptionValue( + py::str(py::getattr(exception_class, "__name__", py::str("")))), + exception_class_qualified_name_( + py::str(py::module::import("torch._jit_internal") + .attr("_qualified_name")( + exception_class, + /*mangle_name=*/false))) {} + + std::string kind() const override { + return "Python exception"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& caller, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + private: + std::string exception_class_qualified_name_; +}; + +// Python Slice class. +struct VISIBILITY_HIDDEN PythonSliceClass : public SugaredValue { + explicit PythonSliceClass() = default; + + std::string kind() const override { + return "Python slice class"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& caller, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; +}; + +} // namespace torch::jit