applied-ai-018 commited on
Commit
b3504fe
·
verified ·
1 Parent(s): b3fa61c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h +11 -0
  4. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h +15 -0
  5. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h +34 -0
  6. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h +11 -0
  7. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h +11 -0
  8. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h +12 -0
  9. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h +19 -0
  10. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h +24 -0
  11. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h +11 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h +11 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h +19 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h +13 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h +17 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h +17 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h +23 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h +47 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h +37 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h +36 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h +13 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h +22 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h +14 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h +15 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h +11 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h +37 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h +19 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h +12 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h +15 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h +16 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h +12 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h +14 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h +11 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h +21 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h +12 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h +23 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h +36 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h +22 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h +20 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h +17 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h +34 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h +13 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h +64 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h +136 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h +20 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h +17 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h +72 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h +17 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h +28 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h +63 -0
ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a8575ec2d83c8ea458a1f0ae4819061f99fb199d013258aab3988a68cdd7783
3
+ size 33555627
ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b713ee72f75b4debab054e71a88b7a269c0a95a44ba53ec3022093be4a45b478
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void AnnotateWarns(const std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace jit
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void Autocast(const std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API bool setAutocastMode(bool value);
12
+ TORCH_API bool autocastEnabled();
13
+
14
+ } // namespace jit
15
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // Replaces prim::Guard nodes with prim::BailOut nodes and
17
+ // computes sets of inputs needed to resume execution at
18
+ // bailout points
19
+ TORCH_API void InsertBailOuts(std::shared_ptr<Graph> graph);
20
+
21
+ // Builds a bailout graph into `target` (which is an empty graph)
22
+ // for a given bailout point `bailout_index`
23
+ // from the original graph `orig` (the original unoptimized graph)
24
+ // BailOut graphs allow Interpreter to resume
25
+ // execution of the (un/de)optimized graph (i.e.
26
+ // a graph that doesn't rely on any assumptions derived from
27
+ // on profiling information) from a given BailOut point
28
+ // should any of the assumptions fail for an actual input.
29
+ TORCH_API std::shared_ptr<Graph> BuildBailOutGraphFrom(
30
+ int64_t bailout_index,
31
+ const std::shared_ptr<Graph>& orig,
32
+ const std::shared_ptr<Graph>& target);
33
+ } // namespace jit
34
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void BatchMM(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void CanonicalizeOps(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void CheckStrictFusion(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void unprofileGraphInputs(const std::shared_ptr<Graph>& graph);
13
+ TORCH_API void unprofileBlock(Block* start_block);
14
+ // Unprofiles all the node outputs in a block.
15
+
16
+ TORCH_API void ClearProfilingInformation(const std::shared_ptr<Graph>& graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Undefinedness makes argument matching fail for regular tensor operations
13
+ // if 1+ arguments are undefined or possibly undefined tensors.
14
+ // Technically, undefined tensors are **not** tensors as the regular tensor
15
+ // operations do not know how to handle them.
16
+ // However, in practice, there are guards and conversion operators that
17
+ // **always** gate regular operations if undefined tensors may be present
18
+ // Eventually, we would love to move to the world where we use optionals
19
+ // in lieu of undefined tensors.
20
+ // When this happens, this pass will be removed
21
+ TORCH_API void ClearUndefinedness(const std::shared_ptr<Graph>& graph);
22
+
23
+ } // namespace jit
24
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool EliminateCommonSubexpression(
9
+ const std::shared_ptr<Graph>& graph);
10
+ }
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void ConstantPooling(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ #include <cstddef>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ // insert GraphExecutor nodes that group together
12
+ // subgraphs that are differentiable by the jit's autodiff passes
13
+ // threshold - minimum number of nodes that will appear in a block
14
+ // returns all differentiable blocks that have been found
15
+ TORCH_API std::vector<Node*> CreateAutodiffSubgraphs(
16
+ const std::shared_ptr<Graph>& graph,
17
+ size_t threshold = 2);
18
+ } // namespace jit
19
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ struct Graph;
8
+
9
+ // Propagates Device type info throughout the given graph.
10
+ TORCH_API bool DeviceTypePropagation(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ struct Graph;
10
+
11
+ // Propagate tensor properties (e.g., dtype, device, is_contiguous, layout)
12
+ // propagation on all tensor objects. Currently, we only support dtype
13
+ // propagation
14
+ TORCH_API bool DtypePropagation(std::shared_ptr<Graph>& graph);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Remove ops that do nothing on the forward pass (like aten::detach).
9
+ // This pass is invoked as a part of freeze_module.
10
+ // This function also takes a set of custom ops to eliminate. All ops in this
11
+ // set must take their output as their first input, i.e. x = f(x, ...)
12
+ TORCH_API bool EliminateNoOps(
13
+ std::shared_ptr<Graph>& graph,
14
+ std::unordered_set<c10::Symbol> custom_ops = {});
15
+
16
+ } // namespace jit
17
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Erase NumberType information. This is necessary for and only used in
9
+ // exporting to ONNX. This pass ensures that no remaining Values have
10
+ // NumberType types, replacing them with tensors.
11
+ // The following things are done to erase NumberType info:
12
+ // - NumberType outputs are changed to DynamicType.
13
+ // - prim::Constant nodes which are numbers get changed into 0-dim tensors of
14
+ // the corresponding type
15
+ // - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes
16
+ // are erased.
17
+ //
18
+ // The pass assumes that DCE will be called sometime after.
19
+ TORCH_API void EraseNumberTypes(const std::shared_ptr<Graph>& graph);
20
+ TORCH_API void EraseNumberTypesOnBlock(Block* block);
21
+
22
+ } // namespace jit
23
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ // Directly after tracing, we have an ill-formed graph with blocks inserted.
10
+ // Example:
11
+ //
12
+ // graph(%self : ClassType<Module>,
13
+ // %input.1 : Float(3, 4)):
14
+ // %1 : ClassType<Module> = prim::GetAttr[name="relu1"](%self)
15
+ // %2 : ClassType<Module> = prim::GetAttr[name="relu2"](%self)
16
+ // %3 : ClassType<Module> = prim::GetAttr[name="rrr"](%2)
17
+ // = prim::TracedModuleForward[scope="__module.relu1"]()
18
+ // block0():
19
+ // %input : Float(3, 4) = aten::relu(%input.1),
20
+ // -> ()
21
+ // = prim::TracedModuleForward[scope="__module.relu2"](),
22
+ // block0():
23
+ // = prim::TracedModuleForward[scope="__module.relu2.rrr"](),
24
+ // block0():
25
+ // %6 : Float(3, 4) = aten::relu(%input),
26
+ // -> ()
27
+ // -> ()
28
+ // return (%6)
29
+ //
30
+ // In this pass, we:
31
+ // 1) Lift Value defs to as high of a scope as needed to ensure that
32
+ // they dominate all their uses. For example, `input` in the above
33
+ // graph needs to be lifted to the top-level block so that its use
34
+ // in the second `relu` operator is dominated.
35
+ // 2) Lambda lift the blocks. This ensures that all values used within
36
+ // each scope have their defs captured.
37
+ // 3) Convert the scope blocks into methods on their respective Modules,
38
+ // and convert TracedModuleForward nodes to CallMethod nodes into those
39
+ // methods.
40
+ //
41
+ // Then, we'll have a well-formed graph with proper method calls.
42
+ TORCH_API void FixupTraceScopeBlocks(
43
+ std::shared_ptr<Graph>& graph,
44
+ Module* self);
45
+
46
+ } // namespace jit
47
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ /** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this
9
+ * module and all its submodules, forward is included by default.
10
+ *
11
+ * The weight and bias of the Conv2d are correspondingly updated. Should only be
12
+ * used on modules in eval mode.
13
+ */
14
+ TORCH_API Module FoldConvBatchNorm(const Module& module);
15
+
16
+ struct TORCH_API ConvBNParameters {
17
+ at::Tensor conv_w;
18
+ at::Tensor conv_b;
19
+ at::Tensor bn_rm;
20
+ at::Tensor bn_rv;
21
+ double bn_eps = 0.0;
22
+ at::Tensor bn_w;
23
+ at::Tensor bn_b;
24
+ };
25
+
26
+ /**
27
+ * Given the current weight and bias tensors of a Conv module and parameters
28
+ * of the BatchNorm module we're folding with, compute the updated values
29
+ * for the weight and bias.
30
+ *
31
+ * The function is basically copied from torch/nn/utils/fusion.py
32
+ */
33
+ TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedConvWeightAndBias(
34
+ const ConvBNParameters& p);
35
+
36
+ } // namespace jit
37
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** \brief This file defines freezing Torchscript module API.
2
+ *
3
+ * This API has python-binding and can be invoked directly or as a part of
4
+ * general optimization pipeline.
5
+ */
6
+ #pragma once
7
+
8
+ #include <torch/csrc/jit/api/module.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+
11
+ /** \brief Freeze Module, i.e., Assume all attributes are constants.
12
+ *
13
+ * Freezing module is a functionality that allows the JIT to internalize
14
+ * immutable attributes. Combined with inlining, the module is aggressively
15
+ * optimized and significant overhead is optimized away. The freezeModule API
16
+ * produces a cloned frozen module.
17
+ */
18
+
19
+ namespace torch {
20
+ namespace jit {
21
+
22
+ TORCH_API Module freeze_module(
23
+ const Module& module,
24
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
25
+ bool freezeInterfaces = true,
26
+ bool preserveParameters = false);
27
+
28
+ // Clone-free version of freeze_module. This modifies the module inplace.
29
+ // Use this version to avoid extra memory usage incurred by cloning the module.
30
+ TORCH_API void freeze_module_inplace(
31
+ Module* module,
32
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
33
+ bool freezeInterfaces = true,
34
+ bool preserveParameters = false);
35
+ } // namespace jit
36
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Concats multiple linear ops with the same Tensor input
9
+ // into a single linear op.
10
+ TORCH_API bool FrozenConcatLinear(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /** \brief Runs a set of Optimizations that Optimize Frozen Graphs
6
+ *
7
+ * Currently this set of optimizations is:
8
+ * - FoldFrozenConvBatchnorm
9
+ * - FoldFrozenConvAddOrSub
10
+ * - FoldFrozenConvMulOrDiv
11
+ * - FoldFrozenLinearBatchnorm
12
+ */
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ TORCH_API void OptimizeFrozenGraph(
18
+ std::shared_ptr<Graph>& graph,
19
+ bool optimize_numerics = true);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Fuses Linear -> BatchNormNd into a single Linear by
9
+ // folding batchnorm weights into linear weights.
10
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
11
+ TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Converts operators & their parameters to mkldnn if it is profitable
9
+ // Currently encompassing Conv2d and Conv3d, and Linear
10
+ // Op must be in float32 and mkldnn must be built
11
+ // This pass only works on frozen graph
12
+ TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ TORCH_API void FuseAddRelu(script::Module& module);
9
+ TORCH_API void FuseAddRelu(std::shared_ptr<Graph>& graph);
10
+ } // namespace jit
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canFuseOnCPULegacy();
9
+ TORCH_API void overrideCanFuseOnCPULegacy(bool value);
10
+
11
+ // NB: Be sure to run DCE before fusion, because dead instructions
12
+ // can prevent fusion opportunities from being exploited.
13
+ // On Windows will noop, NYI
14
+ TORCH_API void FuseGraph(
15
+ std::shared_ptr<Graph>& graph,
16
+ bool strict_fuser_check = false);
17
+
18
+ // \brief Custom fusion pass using a node-level callback to
19
+ // determine the inclusion of nodes in a subgraph.
20
+ //
21
+ // This helper omits aliased inputs and fusion across control flow
22
+ // boundaries.
23
+ //
24
+ // \arg graph The graph to be modified in-place
25
+ // \arg is_fusable A callback run on each fusable node in the graph.
26
+ // \arg kind The label given to the resultant fused subgraph
27
+ // \arg arg_limit The maximum number of args the resultant fused subgraph
28
+ // should have. Note: This will likely develop into a general
29
+ // post condition on the fused subgraph.
30
+ TORCH_API void CustomFuseGraph(
31
+ std::shared_ptr<Graph>& graph,
32
+ const std::function<bool(Node*)>& is_fusable,
33
+ Symbol kind,
34
+ size_t arg_limit = std::numeric_limits<size_t>::max());
35
+
36
+ } // namespace jit
37
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void EliminateRedundantGuards(std::shared_ptr<Graph> graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ void HoistConvPackedParams(script::Module& m);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canRunWithAutograd(Node* node);
9
+
10
+ TORCH_API void InlineAutodiffSubgraphs(
11
+ std::shared_ptr<Graph>& graph,
12
+ size_t threshold = 5);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline Fork and Wait calls. This is used, for example, in ONNX export, where
9
+ // we do not support the explicit parallelism structures and would rather
10
+ // just have a flat graph. This inlines the forked section in the fork()
11
+ // callsite and replaces uses of the result of wait() calls with the values
12
+ // produced from the (now-inlined) forked section.
13
+ TORCH_API void InlineForkWait(const std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void inlineForkedClosures(std::shared_ptr<Graph>& to_clean);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline function and method calls.
9
+ TORCH_API void Inline(Graph& graph);
10
+
11
+ TORCH_API GraphFunction* tryToGraphFunction(Node* n);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void CheckInplace(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void InsertGuards(std::shared_ptr<Graph> graph);
17
+
18
+ TORCH_API void RemoveProfilingNodes(const std::shared_ptr<Graph>& graph);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool RefineIntegerValues(const std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <c10/util/sparse_bitset.h>
8
+ #include <torch/csrc/Export.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+ #include <list>
11
+ #include <unordered_map>
12
+ #include <vector>
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ using SparseBitVector = ::c10::SparseBitVector<256>;
17
+
18
+ // BuildLivenessSets computes "bailout" liveness which is equivalent to
19
+ // "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}"
20
+ TORCH_API std::unordered_map<Node*, std::vector<Value*>> BuildLivenessSets(
21
+ std::shared_ptr<Graph> graph);
22
+ } // namespace jit
23
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool UnrollLoops(std::shared_ptr<Graph>& graph);
10
+
11
+ // Only unrolls constant loops. Will unroll them regardless of loop block size
12
+ TORCH_API bool UnrollConstantLoops(std::shared_ptr<Graph>& graph);
13
+
14
+ TORCH_API Node* PeelLoop(Node* n, size_t times);
15
+
16
+ // return true if graph is modified
17
+ TORCH_API bool PeelProfilingLoops(const std::shared_ptr<Graph>& graph);
18
+
19
+ struct TORCH_API LoopsPeeler {
20
+ LoopsPeeler(std::function<bool(Node* n)> callback, size_t num_iterations = 1)
21
+ : callback_(std::move(callback)), num_iterations_(num_iterations) {}
22
+
23
+ bool run(const std::shared_ptr<Graph>& graph);
24
+
25
+ private:
26
+ void collectLoop(Node* n);
27
+ void collectLoops(Block* block);
28
+ void peelLoops();
29
+
30
+ std::function<bool(Node* n)> callback_ = nullptr;
31
+ Node* in_loop_ = nullptr;
32
+ std::list<Node*> loops_to_peel_;
33
+ size_t num_iterations_ = 1;
34
+ };
35
+ } // namespace jit
36
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
9
+
10
+ // Given a graph with of a method which first argument is %self, lower it to a
11
+ // graph where all attributes accesses are replaced with explicit inputs of the
12
+ // graph (rather than results of prim::GetAttr executed on %self).
13
+ //
14
+ // Returns a tuple (graph, parameters) where the last module.parameters.size()
15
+ // inputs to the graph are the trainable parameters used in this method. The
16
+ // remaining inputs are the true inputs to the function.
17
+ TORCH_API std::pair<std::shared_ptr<Graph>, std::vector<IValue>> LowerGraph(
18
+ Graph& graph,
19
+ const ModulePtr& self);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // removes tuples where TupleConstruct and TupleUnpack are matched
9
+ // but leaves tuples in place across if statements, loops, and as inputs/outputs
10
+ TORCH_API void LowerSimpleTuples(const std::shared_ptr<Graph>& graph);
11
+
12
+ // removes _all_ tuples and raises an error if some cannot be removed
13
+ // this is used by ONNX to ensure there are not tuples before conversion,
14
+ // but will not work on graphs whose inputs contain tuples.
15
+ TORCH_API void LowerAllTuples(const std::shared_ptr<Graph>& graph);
16
+
17
+ TORCH_API void LowerSimpleTuples(Block* block);
18
+
19
+ } // namespace jit
20
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/api/module.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <string>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ TORCH_API void metalInsertPrePackedOps(std::shared_ptr<Graph>& graph);
10
+ TORCH_API void metalInsertPrePackedOps(script::Module& module);
11
+ TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module);
12
+ TORCH_API void metalFoldPrePackingOps(script::Module& module);
13
+ TORCH_API script::Module metalOptimizeForMobile(
14
+ const script::Module& module,
15
+ const std::vector<std::string>& preserved_methods);
16
+ } // namespace jit
17
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
7
+
8
+ #if AT_MKLDNN_ENABLED()
9
+
10
+ #include <ideep/tensor.hpp>
11
+
12
+ #endif // AT_MKLDNN_ENABLED()
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ #if AT_MKLDNN_ENABLED()
18
+
19
+ namespace mkldnn {
20
+
21
+ const static std::map<std::string, std::vector<torch::jit::MatchFilter>>
22
+ fusion_rewrite_map = {
23
+ {"none", {}},
24
+ {"relu", {}},
25
+ };
26
+
27
+ } // namespace mkldnn
28
+
29
+ #endif // AT_MKLDNN_ENABLED()
30
+
31
+ void FuseConvWithEltwise(std::shared_ptr<Graph>& graph);
32
+
33
+ } // namespace jit
34
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ enum class MobileOptimizerType : int8_t {
6
+ CONV_BN_FUSION,
7
+ INSERT_FOLD_PREPACK_OPS,
8
+ REMOVE_DROPOUT,
9
+ FUSE_ADD_RELU,
10
+ HOIST_CONV_PACKED_PARAMS,
11
+ CONV_1D_TO_2D,
12
+ VULKAN_AUTOMATIC_GPU_TRANSFER,
13
+ };
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/passes/pass_manager.h>
5
+
6
+ #include <ATen/Config.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace fuser {
11
+ namespace onednn {
12
+
13
+ static std::atomic<bool> onednn_enabled{true};
14
+
15
+ static std::atomic<bool>& getLlgaEnabled() {
16
+ return onednn_enabled;
17
+ }
18
+
19
+ TORCH_API void fuseGraph(std::shared_ptr<Graph>& g);
20
+
21
+ } // namespace onednn
22
+ } // namespace fuser
23
+
24
+ struct C10_EXPORT RegisterLlgaFuseGraph
25
+ : public PassManager<RegisterLlgaFuseGraph> {
26
+ static bool setEnabled(bool enabled) {
27
+ TORCH_CHECK(
28
+ AT_MKLDNN_ENABLED(),
29
+ "Running oneDNN Graph fuser is only supported with MKLDNN builds.");
30
+ bool oldState = fuser::onednn::getLlgaEnabled();
31
+ fuser::onednn::getLlgaEnabled() = enabled;
32
+ if (enabled) {
33
+ registerPass(fuser::onednn::fuseGraph);
34
+ } else {
35
+ clearPass();
36
+ }
37
+ return oldState;
38
+ }
39
+
40
+ static bool isEnabled() {
41
+ return fuser::onednn::getLlgaEnabled();
42
+ }
43
+
44
+ // override PassManager::registerPass to register pre-pass
45
+ static bool registerPass(GraphPass p) {
46
+ if (!isRegistered()) {
47
+ passID(registerPrePass(std::move(p)), true);
48
+ isRegistered(true);
49
+ return false;
50
+ }
51
+ return true;
52
+ }
53
+
54
+ // override PassManager::clearPass to clear pre-pass
55
+ static void clearPass() {
56
+ if (isRegistered()) {
57
+ clearPrePass(passID());
58
+ isRegistered(true);
59
+ }
60
+ }
61
+ };
62
+
63
+ } // namespace jit
64
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /* `getCustomPrePasses()` returns a vector of passes that will be executed
6
+ * after differentiation but before any fusion. This is the de-facto location
7
+ * for compiler backends to insert passes.
8
+ *
9
+ * `getCustomPostPasses()` returns a vector of passes that will be
10
+ * executed after differentiation and after fusion (if any). This is the
11
+ * location for fusion cleanup passes if they are needed.
12
+ *
13
+ * Static registration of a pass can be done by creating a global
14
+ * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit.
15
+ *
16
+ * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which
17
+ * modify the IR graph in place.
18
+ */
19
+
20
+ namespace torch {
21
+ namespace jit {
22
+
23
+ // A pass modifies a Graph in place.
24
+ using GraphPass = std::function<void(std::shared_ptr<Graph>&)>;
25
+
26
+ // Since Passes are std::functions, we associate a UUID to each pass, this way
27
+ // if we want to deregister a pass, we have something to reference it by.
28
+ using GraphPassNameType = unsigned int;
29
+
30
+ // Graph pass entries have a name associated with them
31
+ using GraphPassEntry = std::pair<GraphPass, GraphPassNameType>;
32
+
33
+ // Return currently registered passes. Passes are stored in a static vector
34
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
35
+ getCustomPostPasses();
36
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
37
+ getCustomPrePasses();
38
+
39
+ TORCH_API GraphPassNameType registerPostPass(GraphPass p);
40
+ TORCH_API GraphPassNameType registerPrePass(GraphPass p);
41
+
42
+ // Look up pass by name passed in, remove it from registered passes
43
+ TORCH_API void clearPostPass(GraphPassNameType p);
44
+ TORCH_API void clearPrePass(GraphPassNameType p);
45
+
46
+ // Remove all passes
47
+ TORCH_API void clearAllPostPasses();
48
+ TORCH_API void clearAllPrePasses();
49
+
50
+ // LEGACY CALL
51
+ struct TORCH_API RegisterPostPass {
52
+ RegisterPostPass(GraphPass p);
53
+ };
54
+
55
+ using RegisterPass = RegisterPostPass;
56
+
57
+ /*
58
+ * PassManager is a wrapper on the register/clear PostPass functions above. It
59
+ * will register the pass provided in "registerPass" and will hold on to its
60
+ * associated name that way clearPass can be later called and will delete the
61
+ * pass used to register when called.
62
+ *
63
+ * PassManager is templated because we want static variables based on a
64
+ * particular GraphPass. When deriving from PassManager, you should send as the
65
+ * template parameter your derived class as you would for the curiously
66
+ * recurring template pattern. This template parameter isn't actually used and
67
+ * is simply done to prevent static members from being shared across derived
68
+ * types.
69
+ */
70
+ template <typename DerivedType>
71
+ struct C10_EXPORT PassManager {
72
+ private:
73
+ // We want this class to be abstract because it's
74
+ virtual void abstract() = 0;
75
+
76
+ protected:
77
+ /*
78
+ * isRegistered() will return if a pass has been registered
79
+ * isRegistered(true) will change the value of the internal static bool
80
+ *
81
+ * There's an internal static bool to this function to keep track of the
82
+ * state, this is so when functions are derived from this class, they don't
83
+ * have to worry about initializing the static members.
84
+ */
85
+ static bool isRegistered(bool flip_bit = false) {
86
+ static bool val = false;
87
+ if (flip_bit)
88
+ val = !val;
89
+ return val;
90
+ }
91
+
92
+ /*
93
+ * name() will return the name of the registered pass
94
+ * name(pass_name, true) will set the name of the pass
95
+ * Similarly to isRegistered we use an internal static variable to hold the
96
+ * name.
97
+ */
98
+ static GraphPassNameType passID(
99
+ GraphPassNameType PassID = 0,
100
+ bool set = false) {
101
+ static GraphPassNameType pass_id = 0;
102
+ if (set)
103
+ pass_id = PassID;
104
+ return pass_id;
105
+ }
106
+
107
+ public:
108
+ // registerPass(pass) will register the pass provided and set the
109
+ // name/isRegistered functions appropriately, it returns a bool value
110
+ // indicating whether the given pass is already registered previously.
111
+ static bool registerPass(GraphPass p) {
112
+ if (!isRegistered()) {
113
+ // If we don't already have a registered pass, register pass
114
+ // hold on to its name, change isRegistered to true
115
+ passID(registerPostPass(std::move(p)), true);
116
+ isRegistered(true);
117
+ return false;
118
+ }
119
+ return true;
120
+ }
121
+
122
+ // Calls ClearPostPass(passID())
123
+ static void clearPass() {
124
+ // If the pass is registered, clear it and change isRegistered to false.
125
+ if (isRegistered()) {
126
+ clearPostPass(passID());
127
+ isRegistered(true);
128
+ }
129
+ }
130
+
131
+ // clang-tidy requires virtual destructor;
132
+ virtual ~PassManager() = default;
133
+ };
134
+
135
+ } // namespace jit
136
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool PeepholeOptimize(
10
+ const std::shared_ptr<Graph>& graph,
11
+ bool disable_shape_peepholes = false);
12
+ // return true if graph is modified
13
+ TORCH_API bool PeepholeOptimize(
14
+ Block* block,
15
+ bool disable_shape_peepholes = false);
16
+ // return true if graph is modified
17
+ TORCH_API bool FuseAddMM(const std::shared_ptr<Graph>& graph);
18
+
19
+ } // namespace jit
20
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Peephole Optimizes alias sensitive peepholes
9
+ // Currently this is invoked as part of PeepholeOptimize
10
+ // return true if graph is modified
11
+ // Optimizes on TensorType if shape_peepholes is true
12
+ TORCH_API bool PeepholeOptimizeAliasSensitive(
13
+ const std::shared_ptr<Graph>& graph,
14
+ bool shape_peepholes);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Peephole Optimizes List ops such as len(li) and li[1].
9
+ // 1. Construct/Unpack optimizations
10
+ // Given a function like this:
11
+ // def foo(a, b):
12
+ // li = [a, b]
13
+ // x, y = li
14
+ // return x, y
15
+ // This pass produces (after dead code elimination):
16
+ // def foo(a, b):
17
+ // return a, b
18
+ //
19
+ // This is only applied to lists that are not modified.
20
+ //
21
+ // 2. getitem optimizations
22
+ // Given a function like this:
23
+ // def foo(a, b):
24
+ // li = [a, b]
25
+ // x = li[0]
26
+ // return x
27
+ // This pass produces (after dead code elimination):
28
+ // def foo(a, b):
29
+ // return a
30
+ //
31
+ // This optimization can only happen if the list is not modified.
32
+ //
33
+ // 3. len optimizations
34
+ // Given a function like this:
35
+ // def foo():
36
+ // li = [1, 2]
37
+ // return len(li)
38
+ // This pass produces (after dead code elimination):
39
+ // def foo():
40
+ // return 2
41
+ //
42
+ // This has the same requirements as the getitem optimizations.
43
+ //
44
+ // 4. ListConstruct + ListConstruct
45
+ // Given a function like this:
46
+ // def foo():
47
+ // return [1, 2] + [3, 4]
48
+ // This pass produces (after dead code elimination):
49
+ // def foo():
50
+ // return [1, 2, 3, 4]
51
+ //
52
+ // This is only applied to lists that are not modified.
53
+ //
54
+ // 5. Slice
55
+ // Given a function like this:
56
+ // def foo():
57
+ // return [1, 2, 3, 4, 5][0:2]
58
+ // This pass produces (after deadcode elimination):
59
+ // def foo():
60
+ // return [1, 2]
61
+ //
62
+ // Currently this is invoked as part of PeepholeOptimize
63
+ // return true if graph is modified.
64
+ // If `refine_list_len` is true will attempt to refine the len of lists through
65
+ // len comparisons and assertions. This does not generally optimize pytorch
66
+ // programs so it is not called by default in PeepholeOptimize.
67
+ TORCH_API bool PeepholeOptimizeListIdioms(
68
+ const std::shared_ptr<Graph>& graph,
69
+ bool refine_list_len = false);
70
+
71
+ } // namespace jit
72
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ using PrePackingOpsFilterFn = std::function<bool(Node*)>;
10
+
11
+ void PrePackingOpsFolder(
12
+ script::Module& m,
13
+ const PrePackingOpsFilterFn& is_foldable_op,
14
+ const std::string& attr_prefix);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ /** Recursively deduplicate multiple uses of the same module by
9
+ * creating an instance clone for each use of the module, which means
10
+ * the type will be the same as before and all the attributes will be
11
+ * copied, then we'll change the use of the original module to the use
12
+ * of cloned module in the Graph.
13
+ *
14
+ * This is done to ensure that modules can survive destructive passes
15
+ * without changing model behavior. For example, here:
16
+ *
17
+ * x = self.conv1(x)
18
+ * x = self.relu(x)
19
+ * x = self.conv2(x)
20
+ * x = self.relu(x)
21
+ *
22
+ * self.relu needs to be deduplicated for potential future destructive passes
23
+ * to work properly.
24
+ */
25
+ TORCH_API void DedupModuleUses(Module& module);
26
+
27
+ } // namespace jit
28
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/quantization/quantization_type.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /** \brief Backend specific pass to fuse dequantize - op - quantize calls
11
+ * as quantized_op calls.
12
+ *
13
+ * Right now this is a fusion for fbgemm backend and only works for quantized
14
+ * conv op, we'll extend to more ops and more backends in the future.
15
+ *
16
+ * Currently supported fusion:
17
+ * q(conv2d(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_conv2d(prepack(to_nhwc(a)),
18
+ * prepack(to_nhwc(w)),
19
+ * prepack(to_nhwc(b))))
20
+ *
21
+ * q(linear(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_linear(prepack(to_nhwc(a)),
22
+ * prepack(to_nhwc(w)),
23
+ * prepack(to_nhwc(b))))
24
+ *
25
+ * \param graph the graph we want to apply fusion
26
+ */
27
+ TORCH_API void QuantFusion(
28
+ std::shared_ptr<Graph>& graph,
29
+ QuantType quant_type = QuantType::STATIC);
30
+
31
+ /** \brief Insert prepack and unpack function in graph
32
+ * We want add pack/unpack functions for quantized weight because later we want
33
+ * to fold the packed weight as an attribute of the module, in order to reduce
34
+ * the cost of packing the weight on the fly in quantized models.
35
+ *
36
+ * Each quantized op has it's corresponding prepack/unpack function,
37
+ * right now, we only need to do prepack/unpack for quantized::linear
38
+ * and quantized::conv2d.
39
+ */
40
+ TORCH_API void InsertPrepackUnpack(std::shared_ptr<Graph>& graph);
41
+
42
+ /** \brief Insert pack and unpack function in all graphs
43
+ * of module
44
+ *
45
+ * Go through graphs of all the methods of all child modules
46
+ * and call InsertPrepackUnpack on the graph.
47
+ */
48
+ TORCH_API void InsertPrepackUnpack(Module& module);
49
+
50
+ TORCH_API script::Module Finalize(
51
+ script::Module& module,
52
+ QuantType quant_type = QuantType::STATIC,
53
+ const std::vector<std::string>& preserved_attrs =
54
+ std::vector<std::string>());
55
+
56
+ TORCH_API void FoldQuantizedPrepackingOps(Module& module);
57
+
58
+ TORCH_API Module FinalizeOnDevicePTQ(
59
+ Module& module,
60
+ QuantType quant_type,
61
+ const std::string& method_name);
62
+ } // namespace jit
63
+ } // namespace torch