applied-ai-018 commited on
Commit
d515c7b
·
verified ·
1 Parent(s): 510ff90

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h +11 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h +11 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h +15 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h +34 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h +11 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h +22 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h +12 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h +24 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h +11 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h +19 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h +11 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h +32 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h +14 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h +11 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h +17 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h +17 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h +23 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h +47 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h +37 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h +36 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h +13 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h +15 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h +24 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h +22 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h +14 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h +13 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h +15 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h +24 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h +11 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h +37 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h +54 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h +19 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h +12 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h +15 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h +16 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h +12 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h +14 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h +21 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h +12 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h +12 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h +23 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h +36 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h +17 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h +22 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h +17 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h +34 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h +18 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h +64 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h +28 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h +136 -0
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool AddIfThenElseOp(std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace jit
11
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void AnnotateWarns(const std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace jit
11
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void Autocast(const std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API bool setAutocastMode(bool value);
12
+ TORCH_API bool autocastEnabled();
13
+
14
+ } // namespace jit
15
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // Replaces prim::Guard nodes with prim::BailOut nodes and
17
+ // computes sets of inputs needed to resume execution at
18
+ // bailout points
19
+ TORCH_API void InsertBailOuts(std::shared_ptr<Graph> graph);
20
+
21
+ // Builds a bailout graph into `target` (which is an empty graph)
22
+ // for a given bailout point `bailout_index`
23
+ // from the original graph `orig` (the original unoptimized graph)
24
+ // BailOut graphs allow Interpreter to resume
25
+ // execution of the (un/de)optimized graph (i.e.
26
+ // a graph that doesn't rely on any assumptions derived from
27
+ // on profiling information) from a given BailOut point
28
+ // should any of the assumptions fail for an actual input.
29
+ TORCH_API std::shared_ptr<Graph> BuildBailOutGraphFrom(
30
+ int64_t bailout_index,
31
+ const std::shared_ptr<Graph>& orig,
32
+ const std::shared_ptr<Graph>& target);
33
+ } // namespace jit
34
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void BatchMM(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API std::shared_ptr<Graph> Canonicalize(
9
+ const std::shared_ptr<Graph>& graph,
10
+ bool keep_unique_names = true);
11
+
12
+ TORCH_API void CanonicalizeOutputs(std::shared_ptr<Graph>& graph);
13
+
14
+ TORCH_API c10::optional<const Use> firstOrLastUse(Value* v, bool find_first);
15
+
16
+ TORCH_API bool isBeforeOrAfter(
17
+ const Use& a,
18
+ const Use& b,
19
+ bool checking_before);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void CheckStrictFusion(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Undefinedness makes argument matching fail for regular tensor operations
13
+ // if 1+ arguments are undefined or possibly undefined tensors.
14
+ // Technically, undefined tensors are **not** tensors as the regular tensor
15
+ // operations do not know how to handle them.
16
+ // However, in practice, there are guards and conversion operators that
17
+ // **always** gate regular operations if undefined tensors may be present
18
+ // Eventually, we would love to move to the world where we use optionals
19
+ // in lieu of undefined tensors.
20
+ // When this happens, this pass will be removed
21
+ TORCH_API void ClearUndefinedness(const std::shared_ptr<Graph>& graph);
22
+
23
+ } // namespace jit
24
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool EliminateCommonSubexpression(
9
+ const std::shared_ptr<Graph>& graph);
10
+ }
11
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Eliminates common inputs among `aten::cat` ops.
9
+ TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr<Graph>& graph);
10
+
11
+ // Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies
12
+ // in the buffers used for concatenation if possible.
13
+ TORCH_API void ExpandConcatAndEliminateRedundancy(
14
+ const std::shared_ptr<Graph>& graph);
15
+
16
+ TORCH_API bool CombineConcats(const std::shared_ptr<Graph>& graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void ConstantPooling(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Runs constant propagation on all objects unless ignore_custom_classes is
9
+ // specified as true, in which case user defined classes are skipped. This is
10
+ // useful to prevent early fusion of packing operations, which end up lowering
11
+ // away information about their constructors (e.g. packed::linear_clamp_prepack
12
+ // and prepacked::conv2d_clamp_prepack)
13
+ // Returns True if the pass made a change to the graph
14
+ TORCH_API bool ConstantPropagation(
15
+ std::shared_ptr<Graph>& graph,
16
+ bool ignore_custom_classes = false);
17
+
18
+ // runs constant propagation only on ops that have non-aliasing inputs & outputs
19
+ // Returns True if the pass made a change to the graph
20
+ TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr<Graph>& graph);
21
+
22
+ // Runs the node if its inputs are constants. Callers of this function must
23
+ // make their own determination if constant prop is appropriate - for example
24
+ // non-deterministic ops or ops with side effects. If ignore_custom_classes is
25
+ // specified, nodes that output user defined classes are not run.
26
+ TORCH_API c10::optional<Stack> runNodeIfInputsAreConstant(
27
+ const Node* node,
28
+ bool ignore_custom_classes = false,
29
+ AliasDb* db = nullptr);
30
+
31
+ } // namespace jit
32
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void CreateFunctionalGraphs(const std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API void InlineFunctionalGraphs(const std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void DecomposeOps(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ struct Graph;
10
+
11
+ // Propagate tensor properties (e.g., dtype, device, is_contiguous, layout)
12
+ // propagation on all tensor objects. Currently, we only support dtype
13
+ // propagation
14
+ TORCH_API bool DtypePropagation(std::shared_ptr<Graph>& graph);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Remove ops that do nothing on the forward pass (like aten::detach).
9
+ // This pass is invoked as a part of freeze_module.
10
+ // This function also takes a set of custom ops to eliminate. All ops in this
11
+ // set must take their output as their first input, i.e. x = f(x, ...)
12
+ TORCH_API bool EliminateNoOps(
13
+ std::shared_ptr<Graph>& graph,
14
+ std::unordered_set<c10::Symbol> custom_ops = {});
15
+
16
+ } // namespace jit
17
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Erase NumberType information. This is necessary for and only used in
9
+ // exporting to ONNX. This pass ensures that no remaining Values have
10
+ // NumberType types, replacing them with tensors.
11
+ // The following things are done to erase NumberType info:
12
+ // - NumberType outputs are changed to DynamicType.
13
+ // - prim::Constant nodes which are numbers get changed into 0-dim tensors of
14
+ // the corresponding type
15
+ // - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes
16
+ // are erased.
17
+ //
18
+ // The pass assumes that DCE will be called sometime after.
19
+ TORCH_API void EraseNumberTypes(const std::shared_ptr<Graph>& graph);
20
+ TORCH_API void EraseNumberTypesOnBlock(Block* block);
21
+
22
+ } // namespace jit
23
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ // Directly after tracing, we have an ill-formed graph with blocks inserted.
10
+ // Example:
11
+ //
12
+ // graph(%self : ClassType<Module>,
13
+ // %input.1 : Float(3, 4)):
14
+ // %1 : ClassType<Module> = prim::GetAttr[name="relu1"](%self)
15
+ // %2 : ClassType<Module> = prim::GetAttr[name="relu2"](%self)
16
+ // %3 : ClassType<Module> = prim::GetAttr[name="rrr"](%2)
17
+ // = prim::TracedModuleForward[scope="__module.relu1"]()
18
+ // block0():
19
+ // %input : Float(3, 4) = aten::relu(%input.1),
20
+ // -> ()
21
+ // = prim::TracedModuleForward[scope="__module.relu2"](),
22
+ // block0():
23
+ // = prim::TracedModuleForward[scope="__module.relu2.rrr"](),
24
+ // block0():
25
+ // %6 : Float(3, 4) = aten::relu(%input),
26
+ // -> ()
27
+ // -> ()
28
+ // return (%6)
29
+ //
30
+ // In this pass, we:
31
+ // 1) Lift Value defs to as high of a scope as needed to ensure that
32
+ // they dominate all their uses. For example, `input` in the above
33
+ // graph needs to be lifted to the top-level block so that its use
34
+ // in the second `relu` operator is dominated.
35
+ // 2) Lambda lift the blocks. This ensures that all values used within
36
+ // each scope have their defs captured.
37
+ // 3) Convert the scope blocks into methods on their respective Modules,
38
+ // and convert TracedModuleForward nodes to CallMethod nodes into those
39
+ // methods.
40
+ //
41
+ // Then, we'll have a well-formed graph with proper method calls.
42
+ TORCH_API void FixupTraceScopeBlocks(
43
+ std::shared_ptr<Graph>& graph,
44
+ Module* self);
45
+
46
+ } // namespace jit
47
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ /** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this
9
+ * module and all its submodules, forward is included by default.
10
+ *
11
+ * The weight and bias of the Conv2d are correspondingly updated. Should only be
12
+ * used on modules in eval mode.
13
+ */
14
+ TORCH_API Module FoldConvBatchNorm(const Module& module);
15
+
16
+ struct TORCH_API ConvBNParameters {
17
+ at::Tensor conv_w;
18
+ at::Tensor conv_b;
19
+ at::Tensor bn_rm;
20
+ at::Tensor bn_rv;
21
+ double bn_eps = 0.0;
22
+ at::Tensor bn_w;
23
+ at::Tensor bn_b;
24
+ };
25
+
26
+ /**
27
+ * Given the current weight and bias tensors of a Conv module and parameters
28
+ * of the BatchNorm module we're folding with, compute the updated values
29
+ * for the weight and bias.
30
+ *
31
+ * The function is basically copied from torch/nn/utils/fusion.py
32
+ */
33
+ TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedConvWeightAndBias(
34
+ const ConvBNParameters& p);
35
+
36
+ } // namespace jit
37
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** \brief This file defines freezing Torchscript module API.
2
+ *
3
+ * This API has python-binding and can be invoked directly or as a part of
4
+ * general optimization pipeline.
5
+ */
6
+ #pragma once
7
+
8
+ #include <torch/csrc/jit/api/module.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+
11
+ /** \brief Freeze Module, i.e., Assume all attributes are constants.
12
+ *
13
+ * Freezing module is a functionality that allows the JIT to internalize
14
+ * immutable attributes. Combined with inlining, the module is aggressively
15
+ * optimized and significant overhead is optimized away. The freezeModule API
16
+ * produces a cloned frozen module.
17
+ */
18
+
19
+ namespace torch {
20
+ namespace jit {
21
+
22
+ TORCH_API Module freeze_module(
23
+ const Module& module,
24
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
25
+ bool freezeInterfaces = true,
26
+ bool preserveParameters = false);
27
+
28
+ // Clone-free version of freeze_module. This modifies the module inplace.
29
+ // Use this version to avoid extra memory usage incurred by cloning the module.
30
+ TORCH_API void freeze_module_inplace(
31
+ Module* module,
32
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
33
+ bool freezeInterfaces = true,
34
+ bool preserveParameters = false);
35
+ } // namespace jit
36
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Concats multiple linear ops with the same Tensor input
9
+ // into a single linear op.
10
+ TORCH_API bool FrozenConcatLinear(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API extern std::function<void(std::shared_ptr<Graph>&)>&
10
+ getFuseFrozenConvAddReluImpl();
11
+
12
+ TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Fuses Convolution -> Batchnorm into a single Convolution by
9
+ // folding batchnorm weights into conv weights.
10
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
11
+ TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr<Graph>& graph);
12
+
13
+ // Fuses Convolution -> Add/Sub into a single Convolution by
14
+ // folding add constant tensor into conv weights.
15
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
16
+ TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr<Graph>& graph);
17
+
18
+ // Fuses Convolution -> Mul/Div into a single Convolution by
19
+ // folding add constant tensor into conv weights.
20
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
21
+ TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr<Graph>& graph);
22
+
23
+ } // namespace jit
24
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /** \brief Runs a set of Optimizations that Optimize Frozen Graphs
6
+ *
7
+ * Currently this set of optimizations is:
8
+ * - FoldFrozenConvBatchnorm
9
+ * - FoldFrozenConvAddOrSub
10
+ * - FoldFrozenConvMulOrDiv
11
+ * - FoldFrozenLinearBatchnorm
12
+ */
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ TORCH_API void OptimizeFrozenGraph(
18
+ std::shared_ptr<Graph>& graph,
19
+ bool optimize_numerics = true);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Fuses Linear -> BatchNormNd into a single Linear by
9
+ // folding batchnorm weights into linear weights.
10
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
11
+ TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Transposes the weight matrix for frozen linear modules.
9
+ // and converts it into a matmul
10
+ TORCH_API bool FrozenLinearTranspose(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Converts operators & their parameters to mkldnn if it is profitable
9
+ // Currently encompassing Conv2d and Conv3d, and Linear
10
+ // Op must be in float32 and mkldnn must be built
11
+ // This pass only works on frozen graph
12
+ TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** \brief Fusing linear patterns as single at::linear for easier pattern
2
+ * matching in later passes
3
+ */
4
+ #pragma once
5
+
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ /** \brief Match the at::linear pattern and fuse it into a single at::linear
12
+ * This pass fuse the addmm or matmul + add generated by JIT back to linear
13
+ * This pass can be deleted once the JIT can emit the aten::linear in the future
14
+ */
15
+ TORCH_API void FuseLinear(std::shared_ptr<Graph>& graph);
16
+
17
+ /** Swap functional linear CallFunctions to aten::linear
18
+ */
19
+ TORCH_API void SwapFunctionalLinear(std::shared_ptr<Graph>& graph);
20
+ /** Swap all functional linear CallFunctions in module
21
+ */
22
+ TORCH_API void SwapFunctionalLinear(Module& module);
23
+ } // namespace jit
24
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ TORCH_API void FuseAddRelu(script::Module& module);
9
+ TORCH_API void FuseAddRelu(std::shared_ptr<Graph>& graph);
10
+ } // namespace jit
11
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canFuseOnCPULegacy();
9
+ TORCH_API void overrideCanFuseOnCPULegacy(bool value);
10
+
11
+ // NB: Be sure to run DCE before fusion, because dead instructions
12
+ // can prevent fusion opportunities from being exploited.
13
+ // On Windows will noop, NYI
14
+ TORCH_API void FuseGraph(
15
+ std::shared_ptr<Graph>& graph,
16
+ bool strict_fuser_check = false);
17
+
18
+ // \brief Custom fusion pass using a node-level callback to
19
+ // determine the inclusion of nodes in a subgraph.
20
+ //
21
+ // This helper omits aliased inputs and fusion across control flow
22
+ // boundaries.
23
+ //
24
+ // \arg graph The graph to be modified in-place
25
+ // \arg is_fusable A callback run on each fusable node in the graph.
26
+ // \arg kind The label given to the resultant fused subgraph
27
+ // \arg arg_limit The maximum number of args the resultant fused subgraph
28
+ // should have. Note: This will likely develop into a general
29
+ // post condition on the fused subgraph.
30
+ TORCH_API void CustomFuseGraph(
31
+ std::shared_ptr<Graph>& graph,
32
+ const std::function<bool(Node*)>& is_fusable,
33
+ Symbol kind,
34
+ size_t arg_limit = std::numeric_limits<size_t>::max());
35
+
36
+ } // namespace jit
37
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/ir/irparser.h>
5
+ #include <torch/csrc/jit/ir/subgraph_matcher.h>
6
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace graph_rewrite_helper {
11
+
12
+ std::string getFuncName(Value* func_value);
13
+ Value* getValue(
14
+ const std::string& name,
15
+ const std::unordered_map<const Value*, Value*>& match_vmap,
16
+ const std::unordered_map<std::string, Value*>& vmap);
17
+ c10::optional<IValue> getIValue(
18
+ const std::string& name,
19
+ const std::unordered_map<const Value*, Value*>& match_vmap,
20
+ const std::unordered_map<std::string, Value*>& vmap);
21
+ TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr<Graph>& graph);
22
+
23
+ bool isClampFusable(
24
+ const Match& match,
25
+ const std::unordered_map<std::string, Value*>& vmap);
26
+
27
+ // This struct contains a compiled IR patterns slated for use in the
28
+ // findPatternMatches function. The struct encapsulates the common
29
+ // information from parseIR that is used in conjunction with the
30
+ // pattern matching facility. A const instance of this struct can
31
+ // also be stored away to cache the compiled IR pattern and reduce
32
+ // runtime cost
33
+ struct PatternInfo {
34
+ std::string pattern_string;
35
+ std::unique_ptr<Graph> pattern_graph;
36
+ std::unordered_map<std::string, Value*> vmap;
37
+ std::vector<MatchFilter> filters;
38
+
39
+ static PatternInfo parse_from_str(
40
+ std::string pattern_string,
41
+ const std::vector<MatchFilter>& filters = {}) {
42
+ PatternInfo rv{
43
+ std::move(pattern_string),
44
+ std::make_unique<Graph>(),
45
+ decltype(vmap){},
46
+ filters};
47
+ parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap);
48
+ return rv;
49
+ }
50
+ };
51
+
52
+ } // namespace graph_rewrite_helper
53
+ } // namespace jit
54
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void EliminateRedundantGuards(std::shared_ptr<Graph> graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ void HoistConvPackedParams(script::Module& m);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canRunWithAutograd(Node* node);
9
+
10
+ TORCH_API void InlineAutodiffSubgraphs(
11
+ std::shared_ptr<Graph>& graph,
12
+ size_t threshold = 5);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline Fork and Wait calls. This is used, for example, in ONNX export, where
9
+ // we do not support the explicit parallelism structures and would rather
10
+ // just have a flat graph. This inlines the forked section in the fork()
11
+ // callsite and replaces uses of the result of wait() calls with the values
12
+ // produced from the (now-inlined) forked section.
13
+ TORCH_API void InlineForkWait(const std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void inlineForkedClosures(std::shared_ptr<Graph>& to_clean);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline function and method calls.
9
+ TORCH_API void Inline(Graph& graph);
10
+
11
+ TORCH_API GraphFunction* tryToGraphFunction(Node* n);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void InsertGuards(std::shared_ptr<Graph> graph);
17
+
18
+ TORCH_API void RemoveProfilingNodes(const std::shared_ptr<Graph>& graph);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool RefineIntegerValues(const std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void liftClosures(const std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <c10/util/sparse_bitset.h>
8
+ #include <torch/csrc/Export.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+ #include <list>
11
+ #include <unordered_map>
12
+ #include <vector>
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ using SparseBitVector = ::c10::SparseBitVector<256>;
17
+
18
+ // BuildLivenessSets computes "bailout" liveness which is equivalent to
19
+ // "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}"
20
+ TORCH_API std::unordered_map<Node*, std::vector<Value*>> BuildLivenessSets(
21
+ std::shared_ptr<Graph> graph);
22
+ } // namespace jit
23
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool UnrollLoops(std::shared_ptr<Graph>& graph);
10
+
11
+ // Only unrolls constant loops. Will unroll them regardless of loop block size
12
+ TORCH_API bool UnrollConstantLoops(std::shared_ptr<Graph>& graph);
13
+
14
+ TORCH_API Node* PeelLoop(Node* n, size_t times);
15
+
16
+ // return true if graph is modified
17
+ TORCH_API bool PeelProfilingLoops(const std::shared_ptr<Graph>& graph);
18
+
19
+ struct TORCH_API LoopsPeeler {
20
+ LoopsPeeler(std::function<bool(Node* n)> callback, size_t num_iterations = 1)
21
+ : callback_(std::move(callback)), num_iterations_(num_iterations) {}
22
+
23
+ bool run(const std::shared_ptr<Graph>& graph);
24
+
25
+ private:
26
+ void collectLoop(Node* n);
27
+ void collectLoops(Block* block);
28
+ void peelLoops();
29
+
30
+ std::function<bool(Node* n)> callback_ = nullptr;
31
+ Node* in_loop_ = nullptr;
32
+ std::list<Node*> loops_to_peel_;
33
+ size_t num_iterations_ = 1;
34
+ };
35
+ } // namespace jit
36
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // This pass removes 'grad_of' nodes, replacing them with conditionals of
9
+ // the form:
10
+ // if any_defined(inputs):
11
+ // outputs = <original_computation>
12
+ // else:
13
+ // outputs = undefineds
14
+ TORCH_API void LowerGradOf(Graph& g);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
9
+
10
+ // Given a graph with of a method which first argument is %self, lower it to a
11
+ // graph where all attributes accesses are replaced with explicit inputs of the
12
+ // graph (rather than results of prim::GetAttr executed on %self).
13
+ //
14
+ // Returns a tuple (graph, parameters) where the last module.parameters.size()
15
+ // inputs to the graph are the trainable parameters used in this method. The
16
+ // remaining inputs are the true inputs to the function.
17
+ TORCH_API std::pair<std::shared_ptr<Graph>, std::vector<IValue>> LowerGraph(
18
+ Graph& graph,
19
+ const ModulePtr& self);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/api/module.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <string>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ TORCH_API void metalInsertPrePackedOps(std::shared_ptr<Graph>& graph);
10
+ TORCH_API void metalInsertPrePackedOps(script::Module& module);
11
+ TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module);
12
+ TORCH_API void metalFoldPrePackingOps(script::Module& module);
13
+ TORCH_API script::Module metalOptimizeForMobile(
14
+ const script::Module& module,
15
+ const std::vector<std::string>& preserved_methods);
16
+ } // namespace jit
17
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
7
+
8
+ #if AT_MKLDNN_ENABLED()
9
+
10
+ #include <ideep/tensor.hpp>
11
+
12
+ #endif // AT_MKLDNN_ENABLED()
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ #if AT_MKLDNN_ENABLED()
18
+
19
+ namespace mkldnn {
20
+
21
+ const static std::map<std::string, std::vector<torch::jit::MatchFilter>>
22
+ fusion_rewrite_map = {
23
+ {"none", {}},
24
+ {"relu", {}},
25
+ };
26
+
27
+ } // namespace mkldnn
28
+
29
+ #endif // AT_MKLDNN_ENABLED()
30
+
31
+ void FuseConvWithEltwise(std::shared_ptr<Graph>& graph);
32
+
33
+ } // namespace jit
34
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // This pass converts aten ops to a normalized form. It is
9
+ // run immediately after IR generation in both the tracer and compiler,
10
+ // so downstream consumers of the IR do not need handle ops in their
11
+ // pre-normalized form.
12
+ // Currently only handles normalization of op aliases.
13
+ TORCH_API void NormalizeOps(const std::shared_ptr<Graph>& graph);
14
+
15
+ const std::unordered_map<Symbol, Symbol>& getOperatorAliasMap();
16
+
17
+ } // namespace jit
18
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/passes/pass_manager.h>
5
+
6
+ #include <ATen/Config.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace fuser {
11
+ namespace onednn {
12
+
13
+ static std::atomic<bool> onednn_enabled{true};
14
+
15
+ static std::atomic<bool>& getLlgaEnabled() {
16
+ return onednn_enabled;
17
+ }
18
+
19
+ TORCH_API void fuseGraph(std::shared_ptr<Graph>& g);
20
+
21
+ } // namespace onednn
22
+ } // namespace fuser
23
+
24
+ struct C10_EXPORT RegisterLlgaFuseGraph
25
+ : public PassManager<RegisterLlgaFuseGraph> {
26
+ static bool setEnabled(bool enabled) {
27
+ TORCH_CHECK(
28
+ AT_MKLDNN_ENABLED(),
29
+ "Running oneDNN Graph fuser is only supported with MKLDNN builds.");
30
+ bool oldState = fuser::onednn::getLlgaEnabled();
31
+ fuser::onednn::getLlgaEnabled() = enabled;
32
+ if (enabled) {
33
+ registerPass(fuser::onednn::fuseGraph);
34
+ } else {
35
+ clearPass();
36
+ }
37
+ return oldState;
38
+ }
39
+
40
+ static bool isEnabled() {
41
+ return fuser::onednn::getLlgaEnabled();
42
+ }
43
+
44
+ // override PassManager::registerPass to register pre-pass
45
+ static bool registerPass(GraphPass p) {
46
+ if (!isRegistered()) {
47
+ passID(registerPrePass(std::move(p)), true);
48
+ isRegistered(true);
49
+ return false;
50
+ }
51
+ return true;
52
+ }
53
+
54
+ // override PassManager::clearPass to clear pre-pass
55
+ static void clearPass() {
56
+ if (isRegistered()) {
57
+ clearPrePass(passID());
58
+ isRegistered(true);
59
+ }
60
+ }
61
+ };
62
+
63
+ } // namespace jit
64
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/onnx/onnx.h>
5
+ #include <unordered_map>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ TORCH_API std::shared_ptr<Graph> ToONNX(
11
+ std::shared_ptr<Graph>& state,
12
+ ::torch::onnx::OperatorExportTypes operator_export_type);
13
+ TORCH_API std::unordered_map<Value*, Value*> BlockToONNX(
14
+ Block* old_block,
15
+ Block* new_block,
16
+ ::torch::onnx::OperatorExportTypes operator_export_type,
17
+ std::unordered_map<Value*, Value*>& env,
18
+ bool is_sub_block = false);
19
+ TORCH_API void NodeToONNX(
20
+ Node* old_node,
21
+ Block* new_block,
22
+ ::torch::onnx::OperatorExportTypes operator_export_type,
23
+ std::unordered_map<Value*, Value*>& env);
24
+ TORCH_API void RemovePrintOps(std::shared_ptr<Graph>& graph);
25
+ TORCH_API void PreprocessCaffe2Ops(std::shared_ptr<Graph>& graph);
26
+
27
+ } // namespace jit
28
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /* `getCustomPrePasses()` returns a vector of passes that will be executed
6
+ * after differentiation but before any fusion. This is the de-facto location
7
+ * for compiler backends to insert passes.
8
+ *
9
+ * `getCustomPostPasses()` returns a vector of passes that will be
10
+ * executed after differentiation and after fusion (if any). This is the
11
+ * location for fusion cleanup passes if they are needed.
12
+ *
13
+ * Static registration of a pass can be done by creating a global
14
+ * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit.
15
+ *
16
+ * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which
17
+ * modify the IR graph in place.
18
+ */
19
+
20
+ namespace torch {
21
+ namespace jit {
22
+
23
+ // A pass modifies a Graph in place.
24
+ using GraphPass = std::function<void(std::shared_ptr<Graph>&)>;
25
+
26
+ // Since Passes are std::functions, we associate a UUID to each pass, this way
27
+ // if we want to deregister a pass, we have something to reference it by.
28
+ using GraphPassNameType = unsigned int;
29
+
30
+ // Graph pass entries have a name associated with them
31
+ using GraphPassEntry = std::pair<GraphPass, GraphPassNameType>;
32
+
33
+ // Return currently registered passes. Passes are stored in a static vector
34
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
35
+ getCustomPostPasses();
36
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
37
+ getCustomPrePasses();
38
+
39
+ TORCH_API GraphPassNameType registerPostPass(GraphPass p);
40
+ TORCH_API GraphPassNameType registerPrePass(GraphPass p);
41
+
42
+ // Look up pass by name passed in, remove it from registered passes
43
+ TORCH_API void clearPostPass(GraphPassNameType p);
44
+ TORCH_API void clearPrePass(GraphPassNameType p);
45
+
46
+ // Remove all passes
47
+ TORCH_API void clearAllPostPasses();
48
+ TORCH_API void clearAllPrePasses();
49
+
50
+ // LEGACY CALL
51
+ struct TORCH_API RegisterPostPass {
52
+ RegisterPostPass(GraphPass p);
53
+ };
54
+
55
+ using RegisterPass = RegisterPostPass;
56
+
57
+ /*
58
+ * PassManager is a wrapper on the register/clear PostPass functions above. It
59
+ * will register the pass provided in "registerPass" and will hold on to its
60
+ * associated name that way clearPass can be later called and will delete the
61
+ * pass used to register when called.
62
+ *
63
+ * PassManager is templated because we want static variables based on a
64
+ * particular GraphPass. When deriving from PassManager, you should send as the
65
+ * template parameter your derived class as you would for the curiously
66
+ * recurring template pattern. This template parameter isn't actually used and
67
+ * is simply done to prevent static members from being shared across derived
68
+ * types.
69
+ */
70
+ template <typename DerivedType>
71
+ struct C10_EXPORT PassManager {
72
+ private:
73
+ // We want this class to be abstract because it's
74
+ virtual void abstract() = 0;
75
+
76
+ protected:
77
+ /*
78
+ * isRegistered() will return if a pass has been registered
79
+ * isRegistered(true) will change the value of the internal static bool
80
+ *
81
+ * There's an internal static bool to this function to keep track of the
82
+ * state, this is so when functions are derived from this class, they don't
83
+ * have to worry about initializing the static members.
84
+ */
85
+ static bool isRegistered(bool flip_bit = false) {
86
+ static bool val = false;
87
+ if (flip_bit)
88
+ val = !val;
89
+ return val;
90
+ }
91
+
92
+ /*
93
+ * name() will return the name of the registered pass
94
+ * name(pass_name, true) will set the name of the pass
95
+ * Similarly to isRegistered we use an internal static variable to hold the
96
+ * name.
97
+ */
98
+ static GraphPassNameType passID(
99
+ GraphPassNameType PassID = 0,
100
+ bool set = false) {
101
+ static GraphPassNameType pass_id = 0;
102
+ if (set)
103
+ pass_id = PassID;
104
+ return pass_id;
105
+ }
106
+
107
+ public:
108
+ // registerPass(pass) will register the pass provided and set the
109
+ // name/isRegistered functions appropriately, it returns a bool value
110
+ // indicating whether the given pass is already registered previously.
111
+ static bool registerPass(GraphPass p) {
112
+ if (!isRegistered()) {
113
+ // If we don't already have a registered pass, register pass
114
+ // hold on to its name, change isRegistered to true
115
+ passID(registerPostPass(std::move(p)), true);
116
+ isRegistered(true);
117
+ return false;
118
+ }
119
+ return true;
120
+ }
121
+
122
+ // Calls ClearPostPass(passID())
123
+ static void clearPass() {
124
+ // If the pass is registered, clear it and change isRegistered to false.
125
+ if (isRegistered()) {
126
+ clearPostPass(passID());
127
+ isRegistered(true);
128
+ }
129
+ }
130
+
131
+ // clang-tidy requires virtual destructor;
132
+ virtual ~PassManager() = default;
133
+ };
134
+
135
+ } // namespace jit
136
+ } // namespace torch