applied-ai-018 commited on
Commit
19dba1c
·
verified ·
1 Parent(s): d104494

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h +58 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h +22 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h +11 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h +19 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h +24 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h +19 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h +32 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h +19 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h +14 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h +13 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h +17 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h +23 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h +29 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h +36 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h +13 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h +15 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h +24 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h +14 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h +15 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h +11 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h +37 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h +15 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h +12 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h +14 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h +11 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h +12 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h +36 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h +17 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h +20 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h +13 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h +28 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h +136 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h +20 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h +72 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h +17 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h +14 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h +14 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h +81 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h +16 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h +43 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h +117 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h +58 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h +55 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h +75 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h +20 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h +81 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h +21 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h +511 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h +94 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h +69 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/pass_manager.h>
6
+ #include <torch/csrc/jit/runtime/profiling_record.h>
7
+
8
+ /*
9
+ * This file contains APIs for cuda fuser;
10
+ *
11
+ * We use an empty static struct to hold the function pointers, which are
12
+ * registered separately. This is to support cpu-only compilation.
13
+ * Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp
14
+ */
15
+
16
+ namespace torch {
17
+ namespace jit {
18
+ namespace fuser {
19
+ namespace cuda {
20
+
21
+ TORCH_API std::atomic<bool>& getCudaFusionGuardMode();
22
+
23
+ TORCH_API bool getSingletonFusion();
24
+ TORCH_API bool setSingletonFusion(bool value);
25
+ TORCH_API bool getHorizontalFusion();
26
+ TORCH_API bool setHorizontalFusion(bool value);
27
+
28
+ // dummy struct to allow API registration
29
+ struct CudaFuserInterface {
30
+ void (*fn_compile_n)(Node*) = nullptr;
31
+ void (*fn_run_n_s)(const Node*, Stack&) = nullptr;
32
+ void (*fn_fuse_graph)(std::shared_ptr<Graph>&) = nullptr;
33
+ bool (*fn_can_fuse_n)(const Node*) = nullptr;
34
+ void (*fn_insert_profile_inodes)(ProfilingRecord* pr) = nullptr;
35
+ bool (*fn_profile_n)(const Node*) = nullptr;
36
+ bool (*fn_skip_n)(const std::string&, bool flip) = nullptr;
37
+ };
38
+
39
+ // Get interface, this is used by registration and user facing API internally
40
+ TORCH_API CudaFuserInterface* getFuserInterface();
41
+
42
+ TORCH_API void compileFusionGroup(Node* fusion_node);
43
+ TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack);
44
+ TORCH_API void fuseGraph(std::shared_ptr<Graph>&);
45
+ TORCH_API bool canFuseNode(const Node* node);
46
+ TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr);
47
+ TORCH_API bool profileNode(const Node* node);
48
+
49
+ TORCH_API bool skipNode(const std::string& symbol_str, bool flip = true);
50
+
51
+ TORCH_API bool isEnabled();
52
+ TORCH_API bool setEnabled(bool is_enabled);
53
+ TORCH_API bool canBeEnabled();
54
+
55
+ } // namespace cuda
56
+ } // namespace fuser
57
+ } // namespace jit
58
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API std::shared_ptr<Graph> Canonicalize(
9
+ const std::shared_ptr<Graph>& graph,
10
+ bool keep_unique_names = true);
11
+
12
+ TORCH_API void CanonicalizeOutputs(std::shared_ptr<Graph>& graph);
13
+
14
+ TORCH_API c10::optional<const Use> firstOrLastUse(Value* v, bool find_first);
15
+
16
+ TORCH_API bool isBeforeOrAfter(
17
+ const Use& a,
18
+ const Use& b,
19
+ bool checking_before);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void CanonicalizeOps(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void unprofileGraphInputs(const std::shared_ptr<Graph>& graph);
13
+ TORCH_API void unprofileBlock(Block* start_block);
14
+ // Unprofiles all the node outputs in a block.
15
+
16
+ TORCH_API void ClearProfilingInformation(const std::shared_ptr<Graph>& graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Undefinedness makes argument matching fail for regular tensor operations
13
+ // if 1+ arguments are undefined or possibly undefined tensors.
14
+ // Technically, undefined tensors are **not** tensors as the regular tensor
15
+ // operations do not know how to handle them.
16
+ // However, in practice, there are guards and conversion operators that
17
+ // **always** gate regular operations if undefined tensors may be present
18
+ // Eventually, we would love to move to the world where we use optionals
19
+ // in lieu of undefined tensors.
20
+ // When this happens, this pass will be removed
21
+ TORCH_API void ClearUndefinedness(const std::shared_ptr<Graph>& graph);
22
+
23
+ } // namespace jit
24
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Eliminates common inputs among `aten::cat` ops.
9
+ TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr<Graph>& graph);
10
+
11
+ // Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies
12
+ // in the buffers used for concatenation if possible.
13
+ TORCH_API void ExpandConcatAndEliminateRedundancy(
14
+ const std::shared_ptr<Graph>& graph);
15
+
16
+ TORCH_API bool CombineConcats(const std::shared_ptr<Graph>& graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Runs constant propagation on all objects unless ignore_custom_classes is
9
+ // specified as true, in which case user defined classes are skipped. This is
10
+ // useful to prevent early fusion of packing operations, which end up lowering
11
+ // away information about their constructors (e.g. packed::linear_clamp_prepack
12
+ // and prepacked::conv2d_clamp_prepack)
13
+ // Returns True if the pass made a change to the graph
14
+ TORCH_API bool ConstantPropagation(
15
+ std::shared_ptr<Graph>& graph,
16
+ bool ignore_custom_classes = false);
17
+
18
+ // runs constant propagation only on ops that have non-aliasing inputs & outputs
19
+ // Returns True if the pass made a change to the graph
20
+ TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr<Graph>& graph);
21
+
22
+ // Runs the node if its inputs are constants. Callers of this function must
23
+ // make their own determination if constant prop is appropriate - for example
24
+ // non-deterministic ops or ops with side effects. If ignore_custom_classes is
25
+ // specified, nodes that output user defined classes are not run.
26
+ TORCH_API c10::optional<Stack> runNodeIfInputsAreConstant(
27
+ const Node* node,
28
+ bool ignore_custom_classes = false,
29
+ AliasDb* db = nullptr);
30
+
31
+ } // namespace jit
32
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ #include <cstddef>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ // insert GraphExecutor nodes that group together
12
+ // subgraphs that are differentiable by the jit's autodiff passes
13
+ // threshold - minimum number of nodes that will appear in a block
14
+ // returns all differentiable blocks that have been found
15
+ TORCH_API std::vector<Node*> CreateAutodiffSubgraphs(
16
+ const std::shared_ptr<Graph>& graph,
17
+ size_t threshold = 2);
18
+ } // namespace jit
19
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void CreateFunctionalGraphs(const std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API void InlineFunctionalGraphs(const std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ struct Graph;
8
+
9
+ // Propagates Device type info throughout the given graph.
10
+ TORCH_API bool DeviceTypePropagation(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Remove ops that do nothing on the forward pass (like aten::detach).
9
+ // This pass is invoked as a part of freeze_module.
10
+ // This function also takes a set of custom ops to eliminate. All ops in this
11
+ // set must take their output as their first input, i.e. x = f(x, ...)
12
+ TORCH_API bool EliminateNoOps(
13
+ std::shared_ptr<Graph>& graph,
14
+ std::unordered_set<c10::Symbol> custom_ops = {});
15
+
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Erase NumberType information. This is necessary for and only used in
9
+ // exporting to ONNX. This pass ensures that no remaining Values have
10
+ // NumberType types, replacing them with tensors.
11
+ // The following things are done to erase NumberType info:
12
+ // - NumberType outputs are changed to DynamicType.
13
+ // - prim::Constant nodes which are numbers get changed into 0-dim tensors of
14
+ // the corresponding type
15
+ // - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes
16
+ // are erased.
17
+ //
18
+ // The pass assumes that DCE will be called sometime after.
19
+ TORCH_API void EraseNumberTypes(const std::shared_ptr<Graph>& graph);
20
+ TORCH_API void EraseNumberTypesOnBlock(Block* block);
21
+
22
+ } // namespace jit
23
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ struct TORCH_API LinearBNParameters {
9
+ at::Tensor linear_w;
10
+ at::Tensor linear_b;
11
+ at::Tensor bn_rm;
12
+ at::Tensor bn_rv;
13
+ double bn_eps = 0.0;
14
+ at::Tensor bn_w;
15
+ at::Tensor bn_b;
16
+ };
17
+
18
+ /**
19
+ * Given the current weight and bias tensors of a Linear module and parameters
20
+ * of the BatchNorm module we're folding with, compute the updated values
21
+ * for the weight and bias.
22
+ *
23
+ * The function is basically copied from torch/nn/utils/fusion.py
24
+ */
25
+ TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedLinearWeightAndBias(
26
+ const LinearBNParameters& p);
27
+
28
+ } // namespace jit
29
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** \brief This file defines freezing Torchscript module API.
2
+ *
3
+ * This API has python-binding and can be invoked directly or as a part of
4
+ * general optimization pipeline.
5
+ */
6
+ #pragma once
7
+
8
+ #include <torch/csrc/jit/api/module.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+
11
+ /** \brief Freeze Module, i.e., Assume all attributes are constants.
12
+ *
13
+ * Freezing module is a functionality that allows the JIT to internalize
14
+ * immutable attributes. Combined with inlining, the module is aggressively
15
+ * optimized and significant overhead is optimized away. The freezeModule API
16
+ * produces a cloned frozen module.
17
+ */
18
+
19
+ namespace torch {
20
+ namespace jit {
21
+
22
+ TORCH_API Module freeze_module(
23
+ const Module& module,
24
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
25
+ bool freezeInterfaces = true,
26
+ bool preserveParameters = false);
27
+
28
+ // Clone-free version of freeze_module. This modifies the module inplace.
29
+ // Use this version to avoid extra memory usage incurred by cloning the module.
30
+ TORCH_API void freeze_module_inplace(
31
+ Module* module,
32
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
33
+ bool freezeInterfaces = true,
34
+ bool preserveParameters = false);
35
+ } // namespace jit
36
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Concats multiple linear ops with the same Tensor input
9
+ // into a single linear op.
10
+ TORCH_API bool FrozenConcatLinear(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API extern std::function<void(std::shared_ptr<Graph>&)>&
10
+ getFuseFrozenConvAddReluImpl();
11
+
12
+ TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Fuses Convolution -> Batchnorm into a single Convolution by
9
+ // folding batchnorm weights into conv weights.
10
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
11
+ TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr<Graph>& graph);
12
+
13
+ // Fuses Convolution -> Add/Sub into a single Convolution by
14
+ // folding add constant tensor into conv weights.
15
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
16
+ TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr<Graph>& graph);
17
+
18
+ // Fuses Convolution -> Mul/Div into a single Convolution by
19
+ // folding add constant tensor into conv weights.
20
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
21
+ TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr<Graph>& graph);
22
+
23
+ } // namespace jit
24
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Fuses Linear -> BatchNormNd into a single Linear by
9
+ // folding batchnorm weights into linear weights.
10
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
11
+ TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Converts operators & their parameters to mkldnn if it is profitable
9
+ // Currently encompassing Conv2d and Conv3d, and Linear
10
+ // Op must be in float32 and mkldnn must be built
11
+ // This pass only works on frozen graph
12
+ TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ TORCH_API void FuseAddRelu(script::Module& module);
9
+ TORCH_API void FuseAddRelu(std::shared_ptr<Graph>& graph);
10
+ } // namespace jit
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canFuseOnCPULegacy();
9
+ TORCH_API void overrideCanFuseOnCPULegacy(bool value);
10
+
11
+ // NB: Be sure to run DCE before fusion, because dead instructions
12
+ // can prevent fusion opportunities from being exploited.
13
+ // On Windows will noop, NYI
14
+ TORCH_API void FuseGraph(
15
+ std::shared_ptr<Graph>& graph,
16
+ bool strict_fuser_check = false);
17
+
18
+ // \brief Custom fusion pass using a node-level callback to
19
+ // determine the inclusion of nodes in a subgraph.
20
+ //
21
+ // This helper omits aliased inputs and fusion across control flow
22
+ // boundaries.
23
+ //
24
+ // \arg graph The graph to be modified in-place
25
+ // \arg is_fusable A callback run on each fusable node in the graph.
26
+ // \arg kind The label given to the resultant fused subgraph
27
+ // \arg arg_limit The maximum number of args the resultant fused subgraph
28
+ // should have. Note: This will likely develop into a general
29
+ // post condition on the fused subgraph.
30
+ TORCH_API void CustomFuseGraph(
31
+ std::shared_ptr<Graph>& graph,
32
+ const std::function<bool(Node*)>& is_fusable,
33
+ Symbol kind,
34
+ size_t arg_limit = std::numeric_limits<size_t>::max());
35
+
36
+ } // namespace jit
37
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canRunWithAutograd(Node* node);
9
+
10
+ TORCH_API void InlineAutodiffSubgraphs(
11
+ std::shared_ptr<Graph>& graph,
12
+ size_t threshold = 5);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void inlineForkedClosures(std::shared_ptr<Graph>& to_clean);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline function and method calls.
9
+ TORCH_API void Inline(Graph& graph);
10
+
11
+ TORCH_API GraphFunction* tryToGraphFunction(Node* n);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void CheckInplace(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool RefineIntegerValues(const std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool UnrollLoops(std::shared_ptr<Graph>& graph);
10
+
11
+ // Only unrolls constant loops. Will unroll them regardless of loop block size
12
+ TORCH_API bool UnrollConstantLoops(std::shared_ptr<Graph>& graph);
13
+
14
+ TORCH_API Node* PeelLoop(Node* n, size_t times);
15
+
16
+ // return true if graph is modified
17
+ TORCH_API bool PeelProfilingLoops(const std::shared_ptr<Graph>& graph);
18
+
19
+ struct TORCH_API LoopsPeeler {
20
+ LoopsPeeler(std::function<bool(Node* n)> callback, size_t num_iterations = 1)
21
+ : callback_(std::move(callback)), num_iterations_(num_iterations) {}
22
+
23
+ bool run(const std::shared_ptr<Graph>& graph);
24
+
25
+ private:
26
+ void collectLoop(Node* n);
27
+ void collectLoops(Block* block);
28
+ void peelLoops();
29
+
30
+ std::function<bool(Node* n)> callback_ = nullptr;
31
+ Node* in_loop_ = nullptr;
32
+ std::list<Node*> loops_to_peel_;
33
+ size_t num_iterations_ = 1;
34
+ };
35
+ } // namespace jit
36
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // This pass removes 'grad_of' nodes, replacing them with conditionals of
9
+ // the form:
10
+ // if any_defined(inputs):
11
+ // outputs = <original_computation>
12
+ // else:
13
+ // outputs = undefineds
14
+ TORCH_API void LowerGradOf(Graph& g);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // removes tuples where TupleConstruct and TupleUnpack are matched
9
+ // but leaves tuples in place across if statements, loops, and as inputs/outputs
10
+ TORCH_API void LowerSimpleTuples(const std::shared_ptr<Graph>& graph);
11
+
12
+ // removes _all_ tuples and raises an error if some cannot be removed
13
+ // this is used by ONNX to ensure there are not tuples before conversion,
14
+ // but will not work on graphs whose inputs contain tuples.
15
+ TORCH_API void LowerAllTuples(const std::shared_ptr<Graph>& graph);
16
+
17
+ TORCH_API void LowerSimpleTuples(Block* block);
18
+
19
+ } // namespace jit
20
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ enum class MobileOptimizerType : int8_t {
6
+ CONV_BN_FUSION,
7
+ INSERT_FOLD_PREPACK_OPS,
8
+ REMOVE_DROPOUT,
9
+ FUSE_ADD_RELU,
10
+ HOIST_CONV_PACKED_PARAMS,
11
+ CONV_1D_TO_2D,
12
+ VULKAN_AUTOMATIC_GPU_TRANSFER,
13
+ };
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/onnx/onnx.h>
5
+ #include <unordered_map>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ TORCH_API std::shared_ptr<Graph> ToONNX(
11
+ std::shared_ptr<Graph>& state,
12
+ ::torch::onnx::OperatorExportTypes operator_export_type);
13
+ TORCH_API std::unordered_map<Value*, Value*> BlockToONNX(
14
+ Block* old_block,
15
+ Block* new_block,
16
+ ::torch::onnx::OperatorExportTypes operator_export_type,
17
+ std::unordered_map<Value*, Value*>& env,
18
+ bool is_sub_block = false);
19
+ TORCH_API void NodeToONNX(
20
+ Node* old_node,
21
+ Block* new_block,
22
+ ::torch::onnx::OperatorExportTypes operator_export_type,
23
+ std::unordered_map<Value*, Value*>& env);
24
+ TORCH_API void RemovePrintOps(std::shared_ptr<Graph>& graph);
25
+ TORCH_API void PreprocessCaffe2Ops(std::shared_ptr<Graph>& graph);
26
+
27
+ } // namespace jit
28
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /* `getCustomPrePasses()` returns a vector of passes that will be executed
6
+ * after differentiation but before any fusion. This is the de-facto location
7
+ * for compiler backends to insert passes.
8
+ *
9
+ * `getCustomPostPasses()` returns a vector of passes that will be
10
+ * executed after differentiation and after fusion (if any). This is the
11
+ * location for fusion cleanup passes if they are needed.
12
+ *
13
+ * Static registration of a pass can be done by creating a global
14
+ * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit.
15
+ *
16
+ * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which
17
+ * modify the IR graph in place.
18
+ */
19
+
20
+ namespace torch {
21
+ namespace jit {
22
+
23
+ // A pass modifies a Graph in place.
24
+ using GraphPass = std::function<void(std::shared_ptr<Graph>&)>;
25
+
26
+ // Since Passes are std::functions, we associate a UUID to each pass, this way
27
+ // if we want to deregister a pass, we have something to reference it by.
28
+ using GraphPassNameType = unsigned int;
29
+
30
+ // Graph pass entries have a name associated with them
31
+ using GraphPassEntry = std::pair<GraphPass, GraphPassNameType>;
32
+
33
+ // Return currently registered passes. Passes are stored in a static vector
34
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
35
+ getCustomPostPasses();
36
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
37
+ getCustomPrePasses();
38
+
39
+ TORCH_API GraphPassNameType registerPostPass(GraphPass p);
40
+ TORCH_API GraphPassNameType registerPrePass(GraphPass p);
41
+
42
+ // Look up pass by name passed in, remove it from registered passes
43
+ TORCH_API void clearPostPass(GraphPassNameType p);
44
+ TORCH_API void clearPrePass(GraphPassNameType p);
45
+
46
+ // Remove all passes
47
+ TORCH_API void clearAllPostPasses();
48
+ TORCH_API void clearAllPrePasses();
49
+
50
+ // LEGACY CALL
51
+ struct TORCH_API RegisterPostPass {
52
+ RegisterPostPass(GraphPass p);
53
+ };
54
+
55
+ using RegisterPass = RegisterPostPass;
56
+
57
+ /*
58
+ * PassManager is a wrapper on the register/clear PostPass functions above. It
59
+ * will register the pass provided in "registerPass" and will hold on to its
60
+ * associated name that way clearPass can be later called and will delete the
61
+ * pass used to register when called.
62
+ *
63
+ * PassManager is templated because we want static variables based on a
64
+ * particular GraphPass. When deriving from PassManager, you should send as the
65
+ * template parameter your derived class as you would for the curiously
66
+ * recurring template pattern. This template parameter isn't actually used and
67
+ * is simply done to prevent static members from being shared across derived
68
+ * types.
69
+ */
70
+ template <typename DerivedType>
71
+ struct C10_EXPORT PassManager {
72
+ private:
73
+ // We want this class to be abstract because it's
74
+ virtual void abstract() = 0;
75
+
76
+ protected:
77
+ /*
78
+ * isRegistered() will return if a pass has been registered
79
+ * isRegistered(true) will change the value of the internal static bool
80
+ *
81
+ * There's an internal static bool to this function to keep track of the
82
+ * state, this is so when functions are derived from this class, they don't
83
+ * have to worry about initializing the static members.
84
+ */
85
+ static bool isRegistered(bool flip_bit = false) {
86
+ static bool val = false;
87
+ if (flip_bit)
88
+ val = !val;
89
+ return val;
90
+ }
91
+
92
+ /*
93
+ * name() will return the name of the registered pass
94
+ * name(pass_name, true) will set the name of the pass
95
+ * Similarly to isRegistered we use an internal static variable to hold the
96
+ * name.
97
+ */
98
+ static GraphPassNameType passID(
99
+ GraphPassNameType PassID = 0,
100
+ bool set = false) {
101
+ static GraphPassNameType pass_id = 0;
102
+ if (set)
103
+ pass_id = PassID;
104
+ return pass_id;
105
+ }
106
+
107
+ public:
108
+ // registerPass(pass) will register the pass provided and set the
109
+ // name/isRegistered functions appropriately, it returns a bool value
110
+ // indicating whether the given pass is already registered previously.
111
+ static bool registerPass(GraphPass p) {
112
+ if (!isRegistered()) {
113
+ // If we don't already have a registered pass, register pass
114
+ // hold on to its name, change isRegistered to true
115
+ passID(registerPostPass(std::move(p)), true);
116
+ isRegistered(true);
117
+ return false;
118
+ }
119
+ return true;
120
+ }
121
+
122
+ // Calls ClearPostPass(passID())
123
+ static void clearPass() {
124
+ // If the pass is registered, clear it and change isRegistered to false.
125
+ if (isRegistered()) {
126
+ clearPostPass(passID());
127
+ isRegistered(true);
128
+ }
129
+ }
130
+
131
+ // clang-tidy requires virtual destructor;
132
+ virtual ~PassManager() = default;
133
+ };
134
+
135
+ } // namespace jit
136
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool PeepholeOptimize(
10
+ const std::shared_ptr<Graph>& graph,
11
+ bool disable_shape_peepholes = false);
12
+ // return true if graph is modified
13
+ TORCH_API bool PeepholeOptimize(
14
+ Block* block,
15
+ bool disable_shape_peepholes = false);
16
+ // return true if graph is modified
17
+ TORCH_API bool FuseAddMM(const std::shared_ptr<Graph>& graph);
18
+
19
+ } // namespace jit
20
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_list_idioms.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Peephole Optimizes List ops such as len(li) and li[1].
9
+ // 1. Construct/Unpack optimizations
10
+ // Given a function like this:
11
+ // def foo(a, b):
12
+ // li = [a, b]
13
+ // x, y = li
14
+ // return x, y
15
+ // This pass produces (after dead code elimination):
16
+ // def foo(a, b):
17
+ // return a, b
18
+ //
19
+ // This is only applied to lists that are not modified.
20
+ //
21
+ // 2. getitem optimizations
22
+ // Given a function like this:
23
+ // def foo(a, b):
24
+ // li = [a, b]
25
+ // x = li[0]
26
+ // return x
27
+ // This pass produces (after dead code elimination):
28
+ // def foo(a, b):
29
+ // return a
30
+ //
31
+ // This optimization can only happen if the list is not modified.
32
+ //
33
+ // 3. len optimizations
34
+ // Given a function like this:
35
+ // def foo():
36
+ // li = [1, 2]
37
+ // return len(li)
38
+ // This pass produces (after dead code elimination):
39
+ // def foo():
40
+ // return 2
41
+ //
42
+ // This has the same requirements as the getitem optimizations.
43
+ //
44
+ // 4. ListConstruct + ListConstruct
45
+ // Given a function like this:
46
+ // def foo():
47
+ // return [1, 2] + [3, 4]
48
+ // This pass produces (after dead code elimination):
49
+ // def foo():
50
+ // return [1, 2, 3, 4]
51
+ //
52
+ // This is only applied to lists that are not modified.
53
+ //
54
+ // 5. Slice
55
+ // Given a function like this:
56
+ // def foo():
57
+ // return [1, 2, 3, 4, 5][0:2]
58
+ // This pass produces (after deadcode elimination):
59
+ // def foo():
60
+ // return [1, 2]
61
+ //
62
+ // Currently this is invoked as part of PeepholeOptimize
63
+ // return true if graph is modified.
64
+ // If `refine_list_len` is true will attempt to refine the len of lists through
65
+ // len comparisons and assertions. This does not generally optimize pytorch
66
+ // programs so it is not called by default in PeepholeOptimize.
67
+ TORCH_API bool PeepholeOptimizeListIdioms(
68
+ const std::shared_ptr<Graph>& graph,
69
+ bool refine_list_len = false);
70
+
71
+ } // namespace jit
72
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ using PrePackingOpsFilterFn = std::function<bool(Node*)>;
10
+
11
+ void PrePackingOpsFolder(
12
+ script::Module& m,
13
+ const PrePackingOpsFilterFn& is_foldable_op,
14
+ const std::string& attr_prefix);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void removeDropout(std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API void removeDropout(script::Module& module);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ // see .cpp for docs
10
+ TORCH_API void RemoveInplaceOps(const std::shared_ptr<Graph>& graph);
11
+
12
+ TORCH_API void ImplicitCastForBinaryInplaceOps(Block* block);
13
+ } // namespace jit
14
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/ir/alias_analysis.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ struct TORCH_API MutationRemover {
12
+ MutationRemover(
13
+ std::shared_ptr<Graph> graph,
14
+ c10::optional<std::function<bool(Node*)>> mutation_filter = c10::nullopt)
15
+ : mutation_filter_(mutation_filter),
16
+ aliasDb_(nullptr),
17
+ graph_(std::move(graph)) {}
18
+
19
+ // return true if graph is modified
20
+ bool removeListMutation();
21
+
22
+ // return true if graph is modified
23
+ bool removeTensorMutation();
24
+
25
+ bool isSpecialMappedOp(Node* n) {
26
+ return n->matches("aten::zero_(Tensor(a!) self) -> Tensor(a!)") ||
27
+ n->matches(
28
+ "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)") ||
29
+ n->matches(
30
+ "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)");
31
+ }
32
+
33
+ bool inplaceOpVariant(Node* n);
34
+
35
+ static bool hasSideEffectOrAlias(Value* v, AliasDb* aliasDb);
36
+
37
+ private:
38
+ Node* createSpecialMappedOp(Node* n);
39
+ bool listMutationFollowingListConstruct(Node* n);
40
+ bool tryMakeCreationAndMutationAtomic(
41
+ Value* mutated_value,
42
+ Node* mutating_op);
43
+ bool tryMakeUnaliasedIfOutputAndMutationAtomic(
44
+ Value* mutated_value,
45
+ Node* mutating_op);
46
+ // return true if graph is modified
47
+ bool RemoveListMutation(Block* block);
48
+ // return true if graph is modified
49
+ bool RemoveTensorMutation(Block* block);
50
+
51
+ AliasDb* getOrCreateAliasDb() {
52
+ if (!aliasDb_) {
53
+ aliasDb_ = std::make_unique<AliasDb>(graph_);
54
+ }
55
+ return aliasDb_.get();
56
+ }
57
+
58
+ c10::optional<std::function<bool(Node*)>> mutation_filter_;
59
+ std::unique_ptr<AliasDb> aliasDb_ = nullptr;
60
+ std::shared_ptr<Graph> graph_;
61
+ };
62
+
63
+ // Removes list mutation with functional equivalents
64
+ // return true if graph is modified
65
+ TORCH_API bool RemoveListMutation(const std::shared_ptr<Graph>& graph);
66
+
67
+ // Replaces in-place aten ops with their functional equivalents
68
+ // when it can be proven that this does not change graph semantics
69
+ // if `mutation_filter` is present, the pass will only attempt to
70
+ // remove mutation on nodes which return true for the filter
71
+ // return true if graph is modified
72
+ TORCH_API bool RemoveTensorMutation(
73
+ const std::shared_ptr<Graph>& graph,
74
+ c10::optional<std::function<bool(Node*)>> mutation_filter = c10::nullopt);
75
+
76
+ // Replaces in-place aten activation ops with their functional equivalence
77
+ TORCH_API bool InplaceToFunctionalActivation(
78
+ const std::shared_ptr<Graph>& graph);
79
+
80
+ } // namespace jit
81
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Graph;
11
+ struct ArgumentSpec;
12
+
13
+ TORCH_API void PropagateRequiresGrad(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/shape_analysis.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Graph;
11
+
12
+ struct propagation_error : std::exception {};
13
+
14
+ class PropertyPropBase {
15
+ // Used for both Shape Propagation and Dtype/Device Propagation
16
+ public:
17
+ explicit PropertyPropBase(std::shared_ptr<Graph> graph)
18
+ : graph_(std::move(graph)) {}
19
+ virtual ~PropertyPropBase() = default;
20
+
21
+ void propagateBlock(Block* block, bool insert_expands = true);
22
+ // insert_expands is used for shape inference
23
+
24
+ void processIf(Node* node);
25
+ void processLoop(Node* node);
26
+
27
+ protected:
28
+ virtual void propagateNode(Node* node, bool insert_expands = true) = 0;
29
+ void setUnshapedType(Value* o);
30
+ void setUnshapedType(Node* node);
31
+ std::shared_ptr<Graph> graph_;
32
+ };
33
+
34
+ TORCH_API void EraseShapeInformation(const std::shared_ptr<Graph>& graph);
35
+ TORCH_API void PropagateInputShapes(const std::shared_ptr<Graph>& graph);
36
+
37
+ TORCH_API bool mergeTypes(
38
+ ArrayRef<Value*> lhs,
39
+ ArrayRef<Value*> rhs,
40
+ ArrayRef<Value*> outputs);
41
+
42
+ } // namespace jit
43
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** This file defines API for pattern-based subgraph rewrites.
2
+ *
3
+ * The API can be used for finding concrete patterns in the model and replacing
4
+ * the corresponding subgraphs with another subgraph. A special case of such
5
+ * rewrites is fusion, where the new subgraph consists of just a single node.
6
+ *
7
+ * There is a default set of the most common patterns that everyone could use.
8
+ * Alternatively, an arbitrary pattern can be registered.
9
+ */
10
+ #pragma once
11
+
12
+ #include <torch/csrc/jit/api/module.h>
13
+ #include <torch/csrc/jit/ir/ir.h>
14
+
15
+ #include <functional>
16
+ #include <unordered_set>
17
+ #include <vector>
18
+
19
+ namespace torch {
20
+ namespace jit {
21
+
22
+ // Forward declarations.
23
+ struct RewritePatternDescr;
24
+ struct Match;
25
+
26
+ using MatchFilter = std::function<
27
+ bool(const Match&, const std::unordered_map<std::string, Value*>&)>;
28
+
29
+ /** Run pattern-based subgraph rewrites on all methods in the module.
30
+ *
31
+ * This pass will go through all methods in the module and try to replace all
32
+ * recognized patterns (see SubgraphRewriter::RegisterDefaultPatterns for the
33
+ * list of these patterns).
34
+ */
35
+ TORCH_API Module PatternBasedRewrite(const Module& module);
36
+
37
+ /** A class implementing API for pattern-based subgraph rewrites.
38
+ *
39
+ * To perform pattern-based subgraph rewrites on a module using this API, one
40
+ * needs to create an object of such class, register rewrite patterns and run
41
+ * the transformation pass (`runOnModule`).
42
+ *
43
+ * To use standard patterns, one could use `RegisterDefaultPatterns`.
44
+ *
45
+ * To enable rewrites of custom patterns, the custom patterns must be registered
46
+ * with `RegisterRewritePattern`.
47
+ */
48
+ class TORCH_API SubgraphRewriter {
49
+ public:
50
+ // Run pattern-based subgraph rewrite pass on the module.
51
+ Module runOnModule(const Module& module);
52
+
53
+ // Run pattern-based subgraph rewrite pass on the graph (used in testing).
54
+ // `filter` is a function that does extra filtering on the match. If it
55
+ // returns false for a given Match, we'll skip the Match. The filter
56
+ // function's arguments consist of a Match and a value map from parsing the
57
+ // pattern graph. Both the Match and the value map are necessary because we
58
+ // need to 1) do extra filtering on the matched result as well as 2) refer to
59
+ // the values in the matched result through the values in the pattern graph.
60
+ void runOnGraph(
61
+ std::shared_ptr<Graph>& graph,
62
+ const std::vector<MatchFilter>& filters);
63
+
64
+ void runOnGraph(
65
+ std::shared_ptr<Graph>& graph,
66
+ const MatchFilter& filter =
67
+ [](const Match&, const std::unordered_map<std::string, Value*>&) {
68
+ return true;
69
+ }) {
70
+ runOnGraph(graph, std::vector<MatchFilter>({filter}));
71
+ }
72
+
73
+ // Register standard rewrite patterns.
74
+ void RegisterDefaultPatterns();
75
+
76
+ /** Register a custom rewrite pattern.
77
+ *
78
+ * The method takes two parameters specifying the pattern:
79
+ * \p PATTERN - IR string representing the pattern subgraph.
80
+ * \p REPLACEMENT - IR string representing the replacement subgraph.
81
+ * \p value name map - vector of pairs mapping values in the replacement graph
82
+ * to the values in the pattern graph. Used for preserving source range info
83
+ * across graph rewrite.
84
+ *
85
+ * See examples of pattern registering in `RegisterDefaultPatterns`.
86
+ */
87
+ void RegisterRewritePattern(
88
+ const std::string& pattern,
89
+ const std::string& replacement,
90
+ const std::vector<std::pair<std::string, std::string>>& value_name_pair =
91
+ {});
92
+
93
+ private:
94
+ std::vector<RewritePatternDescr> patterns_;
95
+ std::unordered_set<Node*> nodes_to_delete_;
96
+
97
+ void rewriteSinglePatternOnGraph(
98
+ std::shared_ptr<Graph>& graph,
99
+ const RewritePatternDescr& pattern,
100
+ const std::vector<MatchFilter>& filters);
101
+
102
+ bool overlapsWithPreviousMatches(const Match* match);
103
+ };
104
+
105
+ /** Rewrite pattern descriptor.
106
+ *
107
+ * This structure is used in the implementation of `SubgraphRewriter` and
108
+ * is not supposed to be used externally.
109
+ */
110
+ struct RewritePatternDescr {
111
+ std::string pattern;
112
+ std::string replacement;
113
+ std::unordered_map<std::string, std::string> value_name_map;
114
+ };
115
+
116
+ } // namespace jit
117
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <unordered_map>
6
+ #include <utility>
7
+ #include <variant>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
13
+
14
+ TORCH_API void PropagateShapesOnGraph(std::shared_ptr<Graph>& graph);
15
+
16
+ // CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
17
+ // From [beg, end) attempt to propagate shapes and
18
+ // build up a graph that will compute all remaining symbolic
19
+ // shapes in [beg, end) that can be executed before beg
20
+
21
+ struct ShapeComputeGraphMapping {
22
+ ShapeComputeGraphMapping(
23
+ std::shared_ptr<Graph> partial_eval_shape_graph,
24
+ std::unordered_map<Value*, Value*>
25
+ enclosing_graph_value_to_shape_graph_input,
26
+ std::unordered_map<Value*, int64_t> graph_output_to_symbolic_shape_dim)
27
+ : partial_eval_shape_graph(std::move(partial_eval_shape_graph)),
28
+ enclosing_graph_value_to_shape_graph_input_(
29
+ std::move(enclosing_graph_value_to_shape_graph_input)),
30
+ graph_output_to_symbolic_shape_dim_(
31
+ std::move(graph_output_to_symbolic_shape_dim)){};
32
+
33
+ std::shared_ptr<Graph> partial_eval_shape_graph;
34
+ std::unordered_map<Value*, Value*>
35
+ enclosing_graph_value_to_shape_graph_input_;
36
+ std::unordered_map<Value*, int64_t> graph_output_to_symbolic_shape_dim_;
37
+ };
38
+
39
+ TORCH_API c10::optional<ShapeComputeGraphMapping>
40
+ PropagateShapesAndBuildLargeShapeComputeGraph(
41
+ std::shared_ptr<Graph>& graph,
42
+ Node* beg,
43
+ Node* end);
44
+
45
+ // don't insert complete tensor shapes in shape compute graphs and instead
46
+ // rely on our partial evaluation pipeline to propagate information.
47
+ // this is a good proxy for our ability to propagate non-complete shape
48
+ // information.
49
+ TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value);
50
+ TORCH_API bool symbolicShapeAnalysisTestModeEnabled();
51
+
52
+ using SSAInput = std::variant<IValue, c10::SymbolicShape>;
53
+ TORCH_API c10::optional<std::vector<c10::SymbolicShape>>
54
+ calculateSymbolicShapesOnOp(
55
+ const FunctionSchema* schema,
56
+ const std::vector<SSAInput>& inputs);
57
+ } // namespace jit
58
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/symbolic_shape_analysis.h>
6
+
7
+ #include <unordered_map>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Takes in a TensorExprGraph of static shapes and generalizes the input shapes
13
+ // to symbolic dimensions. Dimensions of value 1 will be preserved, otherwise
14
+ // dimensions with the same value will be bucketed to the same symbolic shape.
15
+ // E.g. Tensor(5, 3), Tensor(3, 1) -> Tensor(SS(-1), SS(-2)), Tensor(SS(-2), 1)
16
+ // From there, runs symbolic shape inference on the graph, and creates a
17
+ // versioning if in the graph with prim::TensorExprDynamicGuard checking if
18
+ // the inputs at runtime match the Generalized Symbolic Shapes that are inputs
19
+ // to the TE Kernel. The computate to calculate all symbolic dimensions is
20
+ // inlined in to the if block with the TE Kernel. All Sym Dim Value* are
21
+ // appended to the end of the TE Kernel Graph/Node inputs, and the Node is
22
+ // augmented with a integer list attr `symbolic_shape_inputs` that gives the
23
+ // mapping from Value * -> Symbolic Shape int64_t value. For more lengthy IR
24
+ // examples and walkthrough look at ShapeAnalysisTest.DynamicShapesFusion in
25
+ // `test_shape_analysis` Returns True on Success, False on Failure, can fail if
26
+ // shape propagation fails to propagate # of dims or if complete shapes on
27
+ // inputs not set
28
+
29
+ TORCH_API bool GenerateGuard(
30
+ Node* tensorexpr_graph_node,
31
+ bool add_composed_op = false);
32
+
33
+ TORCH_API void runTensorExprDynamicGroup(const Code& code, Stack& stack);
34
+
35
+ enum class StrideInput {
36
+ // Tensors natively store whether they are contiguous or not as a property
37
+ // this makes it faster to query `is_contiguous` or
38
+ // `is_contiguous(memory_format=channels_last)`
39
+ // than looping through the sizes/strides yourself
40
+ // For tensors with these properties, we only store one value:
41
+ TENSOR_CONT,
42
+ TENSOR_CONT_CHANNELS_LAST,
43
+ // now, we describe other cases, where there is one stride enum
44
+ // per dimension
45
+ S_ONE, // STRIDE_ONE: packed
46
+ S_CONT, // STRIDE_CONTIGUOUS: stride[i + 1] * sizes[i + 1]
47
+ S_TRAN_CONT, // STRIDE_TRANSPOSED_CONTIGUOUS: stride[i-1] * sizes[i-1]
48
+ S_AS_ARG, // STRIDE_AS_ARG: stride passed in as runtime value
49
+ };
50
+
51
+ TORCH_API std::string toString(StrideInput si);
52
+ TORCH_API StrideInput strideInputFromString(const std::string& si);
53
+
54
+ } // namespace jit
55
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ // Run TensorExpressions-based fuser.
11
+ // If add_composed_op is true, creates a single operation that
12
+ // performs both the runtime check that types align
13
+ // and then the dispatch to the kernel/unoptimized graph
14
+ TORCH_API void FuseTensorExprs(
15
+ std::shared_ptr<Graph>& graph,
16
+ size_t min_group_size = 2,
17
+ bool add_composed_op = false,
18
+ bool fuse_to_dynamic_shapes = false);
19
+
20
+ TORCH_API void setTensorExprFuserEnabled(bool val);
21
+ TORCH_API bool tensorExprFuserEnabled();
22
+ TORCH_API void setTensorExprDynamicShapeFusionEnabled(bool val);
23
+ TORCH_API bool tensorExprDynamicShapeFusionEnabled();
24
+ TORCH_API bool setTexprReductionsEnabled(bool value);
25
+ TORCH_API bool texprReductionsEnabled();
26
+
27
+ TORCH_API void RemoveProfileNodesAndSpecializeTypes(
28
+ std::shared_ptr<Graph>& graph);
29
+ TORCH_API bool hasTensorTypeSpecialization(Value* v);
30
+ TORCH_API void RemoveTensorTypeSpecializations(std::shared_ptr<Graph>& graph);
31
+ TORCH_API void removeTensorTypeSpecializations(Block* block);
32
+
33
+ using tensor_type_converter_t =
34
+ c10::function_ref<TensorTypePtr(const TensorTypePtr& t)>;
35
+
36
+ // inserts a TypeCheck pattern
37
+ //
38
+ // around the guarded node that has a Subgraph attribute, this inserts a pattern
39
+ //
40
+ // if TypeCheck(...):
41
+ // guarded_node
42
+ // else:
43
+ // FallbackGraph(...)
44
+ //
45
+ // The TypeCheck includes the types of all Tensor inputs to the guarded_node,
46
+ // as processed by the type_converter, a lambda
47
+ // TensorTypePtr(const TensorTypePtr& t). This allows to erase irrelevant
48
+ // aspects of the type.
49
+ //
50
+ // The Fallback graph will have the same subgraph as the guarded node (with the
51
+ // expectation that the guarded_node's subgraph will then be optimized.
52
+ TORCH_API void insertTypeGuard(
53
+ Node* guarded_node,
54
+ tensor_type_converter_t type_converter,
55
+ c10::Symbol kind);
56
+
57
+ TORCH_API bool usedOnlyInSize(Value* v);
58
+ TORCH_API Value* broadcastSizes(at::ArrayRef<Value*> sizes, AliasDb* db);
59
+
60
+ namespace tensorexpr {
61
+ TORCH_API bool isSupported(Node* node);
62
+
63
+ /// Get the modifiable custom operator set object.
64
+ ///
65
+ /// For static shapes, if a custom operator has been added to the custom
66
+ /// operator set, it will be pulled into the NNC fusion group. But it doesn't
67
+ /// work with dynamic shapes unless explicitly register the shape function via
68
+ /// `torch::jit::RegisterShapeComputeGraphForSchema` for the custom operator.
69
+ ///
70
+ /// @return Reference of the custome operator set
71
+ ///
72
+ TORCH_API OperatorSet& getCustomOperatorSet();
73
+ } // namespace tensorexpr
74
+ } // namespace jit
75
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Because differentiable graphs detach the gradients of input Tensors,
9
+ // creating and inlining differentiable graphs changes the requires_grad
10
+ // property of tensors in the graph. This pass updates prim::profiles
11
+ // requires_grad to keep profiled properties up to date, it does not update
12
+ // grad properties of other nodes like graph inputs bc the only downstream
13
+ // user of the grad property is the profiling executor, which just uses
14
+ // the types of prim::profiles
15
+ TORCH_API void UpdateDifferentiableGraphRequiresGrad(
16
+ std::shared_ptr<Graph>& diff_forward_graph,
17
+ c10::optional<bool> new_requires_grad);
18
+
19
+ } // namespace jit
20
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/alias_analysis.h>
5
+ #include <torch/csrc/jit/ir/ir_views.h>
6
+ #include <torch/csrc/jit/jit_log.h>
7
+ #include <torch/csrc/jit/passes/dead_code_elimination.h>
8
+ #include <torch/csrc/jit/passes/peephole.h>
9
+ #include <torch/csrc/jit/passes/peephole_list_idioms.h>
10
+ #include <torch/csrc/jit/runtime/graph_executor.h>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ // Refine from Value of type List -> len of list
16
+ // If a refinement mapping of List Value * -> len is present in a block
17
+ // the list is guaranteed to be that length
18
+ // TODO: vector may be faster
19
+ using ListRefinement = std::unordered_map<Value*, int64_t>;
20
+
21
+ TORCH_API ListRefinement
22
+ intersectRefinements(const ListRefinement& ref1, const ListRefinement& ref2);
23
+
24
+ TORCH_API ListRefinement
25
+ unionRefinements(const ListRefinement& ref1, const ListRefinement& ref2);
26
+
27
+ // Represents the refinement information that can be carried on a boolean
28
+ struct BooleanRefinementMapping {
29
+ BooleanRefinementMapping(
30
+ ListRefinement true_refine,
31
+ ListRefinement false_refine)
32
+ : true_refine_(std::move(true_refine)),
33
+ false_refine_(std::move(false_refine)){};
34
+ BooleanRefinementMapping() = default; // empty
35
+
36
+ static BooleanRefinementMapping FalseRefinements(
37
+ ListRefinement false_refine) {
38
+ return BooleanRefinementMapping({}, std::move(false_refine));
39
+ }
40
+
41
+ static BooleanRefinementMapping TrueRefinements(ListRefinement true_refine) {
42
+ return BooleanRefinementMapping(std::move(true_refine), {});
43
+ }
44
+
45
+ BooleanRefinementMapping intersectBooleanRefinementMapping(
46
+ BooleanRefinementMapping& other) {
47
+ return BooleanRefinementMapping(
48
+ intersectRefinements(true_refine_, other.true_refine()),
49
+ intersectRefinements(false_refine_, other.false_refine()));
50
+ }
51
+
52
+ ListRefinement& true_refine() {
53
+ return true_refine_;
54
+ }
55
+
56
+ ListRefinement& false_refine() {
57
+ return false_refine_;
58
+ }
59
+
60
+ private:
61
+ ListRefinement true_refine_;
62
+ ListRefinement false_refine_;
63
+ };
64
+
65
+ TORCH_API void joinIfRefinements(
66
+ Node* if_node,
67
+ std::unordered_set<Block*>& throwing_blocks,
68
+ ListRefinement& curr_block_refinements,
69
+ ListRefinement& true_block_refinements,
70
+ ListRefinement& false_block_refinements,
71
+ std::unordered_map<Value*, BooleanRefinementMapping>& info);
72
+
73
+ // handles adding blocks to throwing blocks and propagating refinements via
74
+ // boolean comparisons
75
+ TORCH_API bool handleCommonRefinentOperators(
76
+ Node* n,
77
+ std::unordered_set<Block*>& throwing_blocks,
78
+ std::unordered_map<Value*, BooleanRefinementMapping>& info);
79
+
80
+ } // namespace jit
81
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/xnnpack_rewrite.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/mobile_optimizer_type.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ TORCH_API void transformConv1dToConv2d(std::shared_ptr<Graph>& graph);
11
+ TORCH_API void transformConv1dToConv2d(script::Module& module);
12
+ TORCH_API void insertPrePackedOps(std::shared_ptr<Graph>& graph);
13
+ TORCH_API void insertPrePackedOps(script::Module& module);
14
+ TORCH_API void fusePrePackedLinearConvWithClamp(script::Module& module);
15
+ TORCH_API void FoldPrePackingOps(script::Module& module);
16
+ TORCH_API script::Module optimizeForMobile(
17
+ const script::Module& module,
18
+ const std::set<MobileOptimizerType>& optimization_blocklist = {},
19
+ const std::vector<std::string>& preserved_methods = {});
20
+ } // namespace jit
21
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <ATen/core/stack.h>
5
+ #include <c10/util/hash.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/autograd/variable.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+ #include <ostream>
11
+ #include <vector>
12
+
13
+ C10_CLANG_DIAGNOSTIC_PUSH()
14
+ #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
15
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
16
+ #endif
17
+
18
+ namespace torch::jit {
19
+
20
+ // GraphExecutor creates specializations of Graphs for different
21
+ // dimensionalitities and types of inputs.
22
+
23
+ struct ArgumentInfo {
24
+ friend struct ArgumentSpec;
25
+ using plain_data_type = uint64_t;
26
+
27
+ bool defined() const {
28
+ return defined_;
29
+ }
30
+ at::Device device() const {
31
+ return at::Device(DeviceType(dev_type_), device_);
32
+ }
33
+ // XXX: It is guaranteed that this will return false when called on non-tensor
34
+ // arguments
35
+ bool requires_grad() const {
36
+ return requires_grad_;
37
+ }
38
+ int dim() const {
39
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
40
+ return dim_;
41
+ }
42
+ at::ScalarType type() const {
43
+ return at::ScalarType(type_);
44
+ }
45
+ TypePtr toType() const {
46
+ if (!defined())
47
+ return TensorType::get();
48
+
49
+ return TensorType::create(
50
+ type(), device(), c10::optional<size_t>(dim()), requires_grad());
51
+ }
52
+ operator TypePtr() const {
53
+ return toType();
54
+ }
55
+
56
+ private:
57
+ unsigned defined_ : 1;
58
+ unsigned requires_grad_ : 1;
59
+ unsigned : 5;
60
+ unsigned dim_ : 8;
61
+ unsigned device_ : 8;
62
+ unsigned type_ : 8;
63
+ unsigned dev_type_ : 16;
64
+ unsigned : 16;
65
+ };
66
+
67
+ static_assert(
68
+ std::is_standard_layout<ArgumentInfo>::value,
69
+ "ArgumentInfo is to be a POD struct");
70
+ static_assert(
71
+ sizeof(ArgumentInfo) == sizeof(ArgumentInfo::plain_data_type),
72
+ "ArgumentInfo is expected to be a 32-bit struct");
73
+
74
+ struct ArgumentSpec {
75
+ ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs)
76
+ : hash_code(c10::hash_combine(
77
+ num_flat_tensor_inputs,
78
+ num_flat_optional_inputs)) {
79
+ tensor_args.reserve(num_flat_tensor_inputs);
80
+ optional_presence.reserve(num_flat_optional_inputs);
81
+ }
82
+
83
+ void addOptional(const IValue& input) {
84
+ bool is_present = !input.isNone();
85
+ optional_presence.push_back(is_present);
86
+ hash_code = c10::hash_combine(hash_code, is_present);
87
+ }
88
+
89
+ void addTensor(const IValue& input, bool with_grad) {
90
+ AT_ASSERT(input.isTensor(), "Expected Tensor but found ", input.tagKind());
91
+ tensor_args.emplace_back();
92
+ auto& arg = tensor_args.back();
93
+ // Initialize all fields to 0. This is convenient, because e.g.
94
+ // requires_grad() can be checked even on tensors AND will make
95
+ // padding bits all 0s.
96
+ std::memset(&arg, 0, sizeof(ArgumentInfo));
97
+
98
+ // [argspec refcounting] reinterpret the IValue to avoid having to refcount
99
+ // the Tensor microbenchmarks
100
+ // https://github.com/zdevito/pytorch/commit/21e7200a0a0fc456bea2f10e95b1781f83933d10
101
+ // show overhead in extra refcounting along this path
102
+ const at::Tensor* t = reinterpret_cast<const at::Tensor*>(&input);
103
+ arg.defined_ = t->defined();
104
+ if (arg.defined_) {
105
+ arg.requires_grad_ = with_grad && autograd::Variable(*t).requires_grad();
106
+ arg.dim_ = t->dim();
107
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
108
+ at::Device device = t->device();
109
+ arg.dev_type_ =
110
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
111
+ static_cast<std::underlying_type<DeviceType>::type>(device.type());
112
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
113
+ arg.device_ = device.index();
114
+ arg.type_ = static_cast<unsigned>(t->scalar_type());
115
+ }
116
+ combineHash(arg);
117
+ }
118
+
119
+ void combineHash(const ArgumentInfo& arg) {
120
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
121
+ ArgumentInfo::plain_data_type arg_data;
122
+ std::memcpy(&arg_data, &arg, sizeof(ArgumentInfo));
123
+ hash_code = c10::hash_combine(hash_code, arg_data);
124
+ }
125
+
126
+ // equality is fast: check ninputs, and then check the raw array data,
127
+ // there are no size/stride indirections
128
+ // hopefully std::vector<bool> has fast equality
129
+ bool operator==(const ArgumentSpec& spec) const {
130
+ if (optional_presence != spec.optional_presence) {
131
+ return false;
132
+ }
133
+ if (tensor_args.size() != spec.tensor_args.size())
134
+ return false;
135
+ // NB: we need to break out early when there are no elements, because
136
+ // passing a nullptr to memcmp is UB.
137
+ if (tensor_args.empty())
138
+ return true;
139
+ return std::memcmp(
140
+ tensor_args.data(),
141
+ spec.tensor_args.data(),
142
+ tensor_args.size() * sizeof(ArgumentInfo)) == 0;
143
+ }
144
+ bool operator!=(const ArgumentSpec& spec) const {
145
+ return !(*this == spec);
146
+ }
147
+ size_t numTensors() const {
148
+ return tensor_args.size();
149
+ }
150
+ const ArgumentInfo& tensorAt(size_t i) const {
151
+ return tensor_args[i];
152
+ }
153
+ size_t numOptionals() const {
154
+ return optional_presence.size();
155
+ }
156
+ bool isPresent(size_t i) const {
157
+ return optional_presence[i];
158
+ }
159
+ size_t hashCode() const {
160
+ return hash_code;
161
+ }
162
+
163
+ private:
164
+ size_t hash_code; // precomputed on construction
165
+ std::vector<ArgumentInfo> tensor_args;
166
+ std::vector<bool> optional_presence;
167
+ };
168
+
169
+ namespace {
170
+ static constexpr size_t ARG_SPEC_DEPTH_LIMIT = 128;
171
+ }
172
+
173
+ // ArgumentSpecCreator takes an initial graph and comes up with a set
174
+ // of simple instructions to compute the ArgumentSpec given a set of
175
+ // input tensors.
176
+ struct TORCH_API ArgumentSpecCreator {
177
+ // instructs acts on a stack of a list of input IValues
178
+ // at the beginning the stack contains a single list of the inputs to the
179
+ // function the ENTER_ instructs descend into subobjects and push new lists
180
+ // onto the stack
181
+ enum Inst : char {
182
+ ENTER_TUPLE, // consume a tuple ivalue from the top-most list, and push the
183
+ // list of its elements onto the stack as a new list
184
+ ENTER_OBJECT, // same as ENTER_TUPLE, but the input is a class
185
+ LEAVE, // pop the top-most list from the stack
186
+ SKIP, // consume an element from the top-most list, and discard
187
+ SPECIALIZE_OPTIONAL_TENSOR, // consume a optional tensor for the top-most
188
+ // list, and add it to the ArgSpec key being
189
+ // created
190
+ SPECIALIZE_TENSOR, // consume a tensor for the top-most
191
+ // list, and add it to the ArgSpec key being created
192
+ SPECIALIZE_OPTIONAL,
193
+ // consume a nontensor optional from the top-most list,
194
+ // and add it to the ArgSpec key being created
195
+ };
196
+ ArgumentSpecCreator(Graph& graph);
197
+ ArgumentSpec create(bool with_grad, const Stack& stack) const;
198
+ void specializeTypes(Graph& g, const ArgumentSpec& spec) const;
199
+ void dump() const;
200
+ using WrittenSlots = std::unordered_set<std::string>;
201
+
202
+ private:
203
+ void scan(
204
+ const TypePtr& typ,
205
+ size_t depth,
206
+ const WrittenSlots& written_slots);
207
+ size_t num_inputs_;
208
+ size_t num_tensors_ = 0;
209
+ size_t num_optionals_ = 0;
210
+ std::vector<Inst> instructions_;
211
+ };
212
+
213
+ // CompleteArgumentSpec represents one particular specialization.
214
+ // It is designed so that it can be created, hashed, and compared quickly
215
+ // since it is used along the hot-path of the JIT to check if the code
216
+ // we have created is valid for the given inputs.
217
+
218
+ // COmpleteArgumentInfoPOD is only used internally in CompleteArgumentSpec
219
+ // API users should use ArgumentInfo
220
+ struct CompleteArgumentInfoPOD {
221
+ // total size is 64-bit
222
+ unsigned is_tensor : 8; // all other fields are invalid if this is false
223
+ unsigned type : 8; // scalar type
224
+ unsigned defined : 1;
225
+ unsigned requires_grad : 1;
226
+ signed device : 14;
227
+ unsigned dev_type : 16;
228
+ unsigned
229
+ total_dims : 16; // all TensorInfoPODs are in CompleteArgumentSpec's
230
+ // tensor_info() array. total_dims is the total number of
231
+ // dimensions seen so far in all previous members of
232
+ // tensor_info(), including this tensor 2*total_dims
233
+ // becomes the offset into the sizes_strides list for the
234
+ // _next_ tensor in the tensor_info array for tensor 0,
235
+ // the offset is always 0
236
+ };
237
+
238
+ static_assert(
239
+ sizeof(CompleteArgumentInfoPOD) == sizeof(int64_t),
240
+ "CompleteArgumentInfoPOD must be 64-bit struct for CompleteArgumentSpec encoding to work");
241
+
242
+ struct CompleteArgumentInfo;
243
+
244
+ struct CompleteArgumentSpec {
245
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
246
+ CompleteArgumentSpec(bool with_grad, at::ArrayRef<IValue> inputs)
247
+ : hash_code(0), ninputs(inputs.size()) {
248
+ int32_t all_dims = 0;
249
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
250
+ const int32_t num_inputs = inputs.size();
251
+ for (const auto i : c10::irange(num_inputs)) {
252
+ if (!inputs[i].isTensor())
253
+ continue;
254
+ auto& tensor = inputs[i].toTensor();
255
+ all_dims += tensor.defined() ? tensor.ndimension() : 0;
256
+ }
257
+ // allocate enough room for all TensorPODs and dimensions
258
+ data.resize(ninputs + all_dims * 2);
259
+
260
+ // and reinterpret our data array as these structs
261
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
262
+ auto* pods = reinterpret_cast<CompleteArgumentInfoPOD*>(data.data());
263
+ int64_t* next_dim = sizes_strides();
264
+ int32_t total_dims = 0;
265
+ for (const auto i : c10::irange(num_inputs)) {
266
+ auto& pod = pods[i];
267
+ pod.is_tensor = static_cast<uint32_t>(inputs[i].isTensor());
268
+ if (pod.is_tensor) {
269
+ at::Tensor t = inputs[i].toTensor();
270
+ pod.defined = t.defined();
271
+ if (pod.defined) {
272
+ pod.type = static_cast<int>(t.scalar_type());
273
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
274
+ at::Device device = t.device();
275
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
276
+ pod.dev_type = static_cast<std::underlying_type<DeviceType>::type>(
277
+ device.type());
278
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
279
+ pod.device = device.index();
280
+ pod.requires_grad = with_grad && t.requires_grad();
281
+ total_dims += t.ndimension();
282
+ auto sizes = t.sizes();
283
+ std::copy(sizes.begin(), sizes.end(), next_dim);
284
+ next_dim += sizes.size();
285
+ auto strides = t.strides();
286
+ std::copy(strides.begin(), strides.end(), next_dim);
287
+ next_dim += strides.size();
288
+ }
289
+ }
290
+ // each POD has a running tally of all dimensions including its own
291
+ TORCH_CHECK(
292
+ total_dims < std::numeric_limits<uint16_t>::max(),
293
+ "The number of dims cannot be packed into CompleteArgumentSpec:",
294
+ total_dims);
295
+ pod.total_dims = total_dims;
296
+ }
297
+ // we precompute the hash_code to minimize the time inside of hash
298
+ // table operations where we may need to hold a compiler cache lock.
299
+ hash_code = c10::hash_combine(0, ninputs);
300
+ for (auto d : data) {
301
+ hash_code = c10::hash_combine(hash_code, d);
302
+ }
303
+ }
304
+
305
+ // equality is fast: check ninputs, and then check the raw array data,
306
+ // there are no size/stride indirections
307
+ bool operator==(const CompleteArgumentSpec& spec) const {
308
+ return ninputs == spec.ninputs && data == spec.data;
309
+ }
310
+ bool operator!=(const CompleteArgumentSpec& spec) const {
311
+ return !(*this == spec);
312
+ }
313
+ friend struct CompleteArgumentInfo;
314
+ CompleteArgumentInfo at(size_t i) const;
315
+ size_t size() const {
316
+ return ninputs;
317
+ }
318
+ size_t hashCode() const {
319
+ return hash_code;
320
+ }
321
+
322
+ private:
323
+ ArrayRef<CompleteArgumentInfoPOD> tensor_info() const {
324
+ return ArrayRef<CompleteArgumentInfoPOD>(
325
+ reinterpret_cast<const CompleteArgumentInfoPOD*>(data.data()), ninputs);
326
+ }
327
+ // the start of the sizes_strides information, which comes after the
328
+ // CompleteArgumentInfoPOD list.
329
+ const int64_t* sizes_strides() const {
330
+ return data.data() + ninputs;
331
+ }
332
+ int64_t* sizes_strides() {
333
+ return data.data() + ninputs;
334
+ }
335
+ size_t hash_code; // precomputed on construction
336
+ size_t ninputs;
337
+ // layout is ninputs of TensorPOD (each 64-bit) followed by their size and
338
+ // stride info for 3 tensors:
339
+ // [t0POD][t1POD][t2POD]...
340
+ // [t0 sizes][t0 strides][t1 sizes][t1 strides][t2 sizes][t2 strides]
341
+ std::vector<int64_t> data;
342
+ };
343
+
344
+ // public view of compressed CompleteArgumentInfo
345
+ struct CompleteArgumentInfo {
346
+ CompleteArgumentInfo(const CompleteArgumentSpec& spec, const int i)
347
+ : spec(spec), i(i) {}
348
+ bool isTensor() const {
349
+ return pod(i).is_tensor;
350
+ }
351
+ at::ScalarType type() const {
352
+ return at::ScalarType(pod(i).type);
353
+ }
354
+ bool defined() const {
355
+ return pod(i).defined;
356
+ }
357
+ bool requires_grad() const {
358
+ return pod(i).requires_grad;
359
+ }
360
+ at::Device device() const {
361
+ return at::Device(
362
+ DeviceType(pod(i).dev_type),
363
+ static_cast<c10::DeviceIndex>(pod(i).device));
364
+ }
365
+ int ndimension() const {
366
+ // See [valid range], it is always valid to ask for offset for (i + 1)
367
+ return (sizes_strides_offset(i + 1) - sizes_strides_offset(i)) / 2;
368
+ }
369
+ at::IntArrayRef sizes() const {
370
+ return at::IntArrayRef(
371
+ spec.sizes_strides() + sizes_strides_offset(i), ndimension());
372
+ }
373
+ at::IntArrayRef strides() const {
374
+ int ndim = ndimension();
375
+ return at::IntArrayRef(
376
+ spec.sizes_strides() + sizes_strides_offset(i) + ndim, ndim);
377
+ }
378
+ operator TypePtr() const {
379
+ if (!defined())
380
+ return TensorType::get();
381
+ return TensorType::create(
382
+ type(),
383
+ device(),
384
+ c10::VaryingShape<int64_t>{sizes()},
385
+ c10::VaryingShape<int64_t>{strides()},
386
+ requires_grad());
387
+ }
388
+
389
+ private:
390
+ // offsetinto sizes_strides() array where the sizes start for tensor j
391
+ // [valid range] valid range is [0, ninputs]
392
+ // (i.e. you can ask for the offset at ninputs, which would be the offset of
393
+ // the next tensor if it existed)
394
+ int sizes_strides_offset(int j) const {
395
+ if (j == 0)
396
+ return 0;
397
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
398
+ return 2 * pod(j - 1).total_dims;
399
+ }
400
+ const CompleteArgumentInfoPOD& pod(int j) const {
401
+ return spec.tensor_info().at(j);
402
+ }
403
+ const CompleteArgumentSpec& spec;
404
+ const int i;
405
+ };
406
+
407
+ inline std::ostream& operator<<(std::ostream& out, const ArgumentInfo& info) {
408
+ if (!info.defined()) {
409
+ return out << "<undefined>";
410
+ }
411
+ out << "Tensor(device=" << info.device() << ", type=" << toString(info.type())
412
+ << ", requires_grad=" << info.requires_grad() << ", dims=" << info.dim()
413
+ << ")";
414
+ return out;
415
+ }
416
+
417
+ inline std::ostream& operator<<(std::ostream& out, const ArgumentSpec& spec) {
418
+ out << "{";
419
+ for (const auto i : c10::irange(spec.numTensors())) {
420
+ if (i > 0)
421
+ out << ", ";
422
+ out << spec.tensorAt(i);
423
+ }
424
+ out << "; ";
425
+ for (const auto i : c10::irange(spec.numOptionals())) {
426
+ if (i > 0)
427
+ out << ", ";
428
+ out << spec.isPresent(i);
429
+ }
430
+ out << "}";
431
+ return out;
432
+ }
433
+
434
+ inline std::ostream& operator<<(
435
+ std::ostream& out,
436
+ const CompleteArgumentInfo& info) {
437
+ if (!info.defined()) {
438
+ return out << "<undefined>";
439
+ }
440
+ out << "Tensor(device=" << info.device() << ", type=" << toString(info.type())
441
+ << ", requires_grad=" << info.requires_grad()
442
+ << ", sizes=" << info.sizes() << ", strides=" << info.strides() << ")";
443
+ return out;
444
+ }
445
+
446
+ inline std::ostream& operator<<(
447
+ std::ostream& out,
448
+ const CompleteArgumentSpec& spec) {
449
+ out << "{";
450
+ for (const auto i : c10::irange(spec.size())) {
451
+ if (i > 0)
452
+ out << ", ";
453
+ out << spec.at(i);
454
+ }
455
+ out << "}";
456
+ return out;
457
+ }
458
+
459
+ inline CompleteArgumentInfo CompleteArgumentSpec::at(size_t i) const {
460
+ return CompleteArgumentInfo(*this, i);
461
+ }
462
+
463
+ inline c10::optional<int8_t> convertOptional(
464
+ c10::optional<c10::ScalarType> const& from) {
465
+ return (from) ? c10::optional<int8_t>(static_cast<int8_t>(*from))
466
+ : c10::optional<int8_t>{};
467
+ }
468
+
469
+ } // namespace torch::jit
470
+
471
+ namespace std {
472
+
473
+ template <typename T>
474
+ struct hash<c10::VaryingShape<T>> {
475
+ size_t operator()(const c10::VaryingShape<T>& vs) const {
476
+ return c10::get_hash(
477
+ vs.size(),
478
+ vs.size() ? vs.sizes().value() : std::vector<c10::optional<T>>());
479
+ }
480
+ };
481
+
482
+ template <>
483
+ struct hash<c10::TensorType> {
484
+ size_t operator()(const c10::TensorType& ptt) const {
485
+ return c10::get_hash<
486
+ c10::optional<int8_t>,
487
+ c10::VaryingShape<int64_t>,
488
+ c10::VaryingShape<int64_t>,
489
+ c10::optional<bool>>(
490
+ torch::jit::convertOptional(ptt.scalarType()),
491
+ ptt.sizes(),
492
+ ptt.strides(),
493
+ ptt.requiresGrad());
494
+ }
495
+ };
496
+
497
+ template <>
498
+ struct hash<torch::jit::ArgumentSpec> {
499
+ size_t operator()(const torch::jit::ArgumentSpec& spec) const {
500
+ return spec.hashCode();
501
+ }
502
+ };
503
+ template <>
504
+ struct hash<torch::jit::CompleteArgumentSpec> {
505
+ size_t operator()(const torch::jit::CompleteArgumentSpec& spec) const {
506
+ return spec.hashCode();
507
+ }
508
+ };
509
+ } // namespace std
510
+
511
+ C10_CLANG_DIAGNOSTIC_POP()
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ #include <memory>
7
+ #include <vector>
8
+
9
+ namespace torch::jit {
10
+
11
+ using value_list = std::vector<Value*>;
12
+ // clang-format off
13
+ // Example showcasing how Gradient is constructed:
14
+ //
15
+ // Let's assume we have a function f, `m` and `n` do not require grad
16
+ // (`n` can depend only on `m`):
17
+ // y, n = f(x, m)
18
+ //
19
+ // Now, let's assume that the reverse of f (called f') needs to use values of `x`, `t` and `y`.
20
+ // `t` is an intermediate value produced in the body of f, and let's assume that it requires
21
+ // grad too.
22
+ //
23
+ // In this case differentiate(f) will return this:
24
+ // y, n, t = f(x, m) // `t` is appended to the output list
25
+ // dx = f'(dy, dt, x, t, y) // No `dm` or `dn` because they do not require gradient
26
+ // // All needed values from f are prepended to the input list
27
+ //
28
+ // f_real_outputs = 2 // Only first two outputs were present in f originally
29
+ // df_input_vjps = {0, 2} // i.e. connect grad_fn of y and t variables produced by f,
30
+ // y t // with y's output_nr = 0 and t's output_nr = 1
31
+ // df_input_captures = {I0, O2, O0} // Order matches the prefix of inputs to df
32
+ // x t y
33
+ // df_output_vjps = {0} // i.e. connect next_edge[0] of grad_fn to x's (grad_fn, output_nr).
34
+ //
35
+ // Terminology: vjp = vector-jacobian product
36
+ // clang-format on
37
+
38
+ struct Gradient {
39
+ explicit operator bool() const {
40
+ return df != nullptr;
41
+ }
42
+ std::shared_ptr<Graph> f;
43
+ std::shared_ptr<Graph> df;
44
+
45
+ // Describes how to construct outputs of f from what its graph will return.
46
+ // This is necessary because some trailing outputs are intermediates produced
47
+ // only to be saved for df (and should be ignored).
48
+ size_t f_real_outputs = 0; // initialized for safety.
49
+
50
+ // df inputs are split into two sections: vjps (aka grad_outputs) and
51
+ // captures. VJPs are "seeds" for the gradient computation given for each
52
+ // input capture of an Output kind. Captures are values the need to be saved
53
+ // when f is run. We handle inputs specially, because this allows us to avoid
54
+ // adding extra vjps as df inputs.
55
+
56
+ std::vector<size_t> df_input_vjps; // Offsets into f's outputs.
57
+ // capture can come from inputs or outputs
58
+ std::vector<size_t> df_input_captured_inputs; // Offsets into f's inputs
59
+ std::vector<size_t> df_input_captured_outputs; // Offsets into f's outputs
60
+
61
+ // df will produce vjps for a subset of inputs of f that required grad.
62
+ // df_output_vjps[idx] == inp_idx means that idx-th output of df produces a
63
+ // vjp for inp_idx-th input of f.
64
+ std::vector<size_t> df_output_vjps; // Offsets into f's inputs.
65
+
66
+ // How to use gradient to implement a differentiable autograd function:
67
+ // When running f:
68
+ // - Unwrap input Variables
69
+ // - Run f's graph
70
+ // - Create grad_fn
71
+ // - Wrap outputs in Variables (assume we have a tensor_outputs array):
72
+ // outputs = map(Variable, tensor_output)
73
+ // for i, offset in enumerate(df_input_vjps):
74
+ // outputs[offset].set_grad_fn(grad_fn, output_nr=i)
75
+ // - Use df_output_vjps to connect next_edges of grad_fn:
76
+ // for idx in df_output_vjps:
77
+ // grad_fn.add_next_edge(inputs[idx].gradient_edge())
78
+ // - Save captures for df (care needs to be taken to use SavedVariables for
79
+ // inputs and outputs that we will actually return)
80
+ // - Return outputs[:f_real_outputs]
81
+ //
82
+ // When running df:
83
+ // - Concatenate received vjps and captured Variables
84
+ // - Interpret df
85
+ // - Wrap outputs of df into Variables (that don't require grad)
86
+ };
87
+ TORCH_API Gradient differentiate(std::shared_ptr<Graph>& graph);
88
+
89
+ // can we take a derivative of this node symbolically?
90
+ TORCH_API bool isDifferentiable(const Node* n);
91
+ TORCH_API bool isDifferentiable(Graph& g);
92
+ TORCH_API bool isZero(Value* v);
93
+
94
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/frontend/schema_matching.h>
5
+ #include <cstddef>
6
+
7
+ namespace torch::jit {
8
+
9
+ // Calculates the number of args that need to be passed in.
10
+ // Less args may be needed if defaults are provided.
11
+ // Returns: {number args needed, number of out args}
12
+ inline std::pair<int64_t, int64_t> CalculateNecessaryArgs(
13
+ const std::vector<Argument>& schema_args,
14
+ at::ArrayRef<Value*> actual_inputs,
15
+ bool allow_trailing_out_args) {
16
+ if (schema_args.empty()) {
17
+ return std::make_pair(0, 0);
18
+ }
19
+
20
+ // count number of out arguments
21
+ int64_t schema_idx = static_cast<int64_t>(schema_args.size()) - 1;
22
+ if (allow_trailing_out_args) {
23
+ // skip over out arguments in the end.
24
+ while (schema_idx >= 0) {
25
+ const auto& current_arg = schema_args.at(schema_idx);
26
+ if (!current_arg.is_out()) {
27
+ break;
28
+ }
29
+ schema_idx--;
30
+ }
31
+ }
32
+
33
+ int64_t num_out = static_cast<int64_t>(schema_args.size()) - schema_idx - 1;
34
+
35
+ if (schema_args.size() < actual_inputs.size()) {
36
+ return std::make_pair(actual_inputs.size(), num_out);
37
+ }
38
+
39
+ // if it is the default args, we reset the index to the last element
40
+ if (!allow_trailing_out_args) {
41
+ schema_idx = schema_args.size() - 1;
42
+ }
43
+ // keeps track of trailing unnecessary args
44
+ while (schema_idx >= 0) {
45
+ // this means it is not default argument, so it is necessary
46
+ if (!schema_args.at(schema_idx).default_value().has_value()) {
47
+ return std::make_pair(schema_idx + 1, num_out);
48
+ } else {
49
+ auto schema_value =
50
+ schema_args.at(schema_idx).default_value().value().toIValue();
51
+ // non-const value will become nullptr here, so will be marked necessary
52
+ // non-const would include prim::ListConstruct, prim::DictConstruct as
53
+ // well.
54
+ auto actual_value = toIValue(actual_inputs[schema_idx]);
55
+ if (!actual_value.has_value()) {
56
+ return std::make_pair(schema_idx + 1, num_out);
57
+ }
58
+ // if the IR has same value as default value of the schema,
59
+ // it is not necessary argument.
60
+ if (schema_value != actual_value.value()) {
61
+ return std::make_pair(schema_idx + 1, num_out);
62
+ }
63
+ }
64
+ schema_idx--;
65
+ }
66
+ return std::make_pair(0, num_out);
67
+ }
68
+
69
+ } // namespace torch::jit