applied-ai-018 commited on
Commit
b292467
·
verified ·
1 Parent(s): 19dba1c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h +140 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h +11 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h +15 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h +34 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h +11 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h +11 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h +11 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h +42 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h +11 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h +17 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h +47 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h +37 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h +22 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h +13 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h +24 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h +54 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h +19 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h +12 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h +16 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h +21 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h +12 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h +23 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h +22 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h +17 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h +34 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h +18 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h +64 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h +17 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h +38 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h +14 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h +28 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h +63 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h +9 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h +216 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h +68 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h +46 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h +1272 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h +15 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h +20 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h +12 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h +23 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h +11 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h +11 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h +16 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h +63 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h +21 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h +57 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h +22 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h +175 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h +31 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+
4
+ #include <torch/csrc/jit/backends/backend_detail.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /*
14
+ * BackendDebugHandleManager is responsible for issuing debug handles to
15
+ * backends. Debug handles are associated with nodes of a graph.
16
+ * BackendDebugHandleManager also maintains a map
17
+ * [debug-handle, DebugInfoTuple = {source range, inlined callstack ptr]} that
18
+ * will help generate a callstack for exception raised using debug handles.
19
+ * Effectively debug handles are something that is given to backend and later
20
+ * when an exception occurs in the backend, backend can tell, using debug
21
+ * handle, that an exception occurred here. Then the runtime can generate
22
+ * callstack correspoding to the exception.
23
+ * There are two parts to BackendDebugHandleManager:
24
+ * 1. static std::atomic debug_handle
25
+ * 2. Map of [debug-handle, DebugInfoTuple]
26
+ *
27
+ * About 1:
28
+ * Why do they have to be unique. The reason is that by ensuring
29
+ * uniqueness of debug handles, we remove the burden of another layer of
30
+ * mapping where we need to say this set of debug handles were generated for
31
+ * this lowered module or this bytecode function. This simplifies the API for
32
+ * serialization since debug handles can uniquely identify DebugInfoTuple.
33
+ * Thus simplifies the runtime API for throwing exception. Exception throwing
34
+ * only needs to know debug_handle and not which module or method threw it.
35
+ * There are 2 issues to keep in mind, though,for static std::atomic
36
+ * debug_handle: A. Performance implications of using atomic variable. However
37
+ * this is only used for compilation so we assume to absorb some of that
38
+ * penalty. Plus if there is no contention then we should have less to worry
39
+ * about. B. If repeated compilation is part of a long running process then we
40
+ * may overflow int64_t. We may detect and fail on this. For now this is not
41
+ * done.
42
+ *
43
+ * Now about 2:
44
+ * There are two usecases for [debug-handle, DebugInfoTuple]
45
+ * A. During bytecode generation the DebugInfoTuple corresponding to the nodes
46
+ * of the inlined graph being serialized, are stored in this object and a
47
+ * unique debug handle is returned. This unique debug handle is stored in
48
+ * mobile_debug info for pytorch lite models. It will be used for raising
49
+ * exceptions as well as profiling. B. During backend lowering, each backend's
50
+ * preprocess/compile method can compile method's graph and serialize those
51
+ * methods. Once the method is lowered to backend, graph is essentially lost.
52
+ * Without access to graph it is hard to generate model level debug info. Thus
53
+ * the debug handles provide a way to map nodes of the graph to the model level
54
+ * debug info.
55
+ *
56
+ * During byte-code model serialization, [debug-handle, DebugInfoTuple] is
57
+ * serialized. Now we know a. debug handles and b. how to map debug handles to
58
+ * model source code. Thus we can either do eager symbolication by converting
59
+ * debug handles to corresponding source code at runtime, or do lazy
60
+ * symbolicattion offline.
61
+ *
62
+ * Note that it is not necessary to serialize [debug-handle, DebugInfoTuple]
63
+ * corresponding to lowered backend if the lowering process, that is
64
+ * preprocess/compile, and execution happens in the same session, then eager
65
+ * symbolication can be employed.
66
+ *
67
+ * Now how does BackendDebugHandleManager capture all of the above?
68
+ * By providing two API.
69
+ * 1. getNextDebugHandle which given a Node* returns a unique debug handle,
70
+ * that will uniquely identify DebugInfoTuple.
71
+ * and
72
+ * 2. getCallStackPtrMap which returns the map
73
+ * [debug-handle, DebugInfoTuple]
74
+ *
75
+ * 1 provides debug handles to backends and 2 provides runtime a way to map
76
+ * debug handles to source level debug info.
77
+ *
78
+ * So why does debug handle map to DebugInfoTuple = {source range and inlined
79
+ * cs}? {debug_handle, source_range_tag, serialized_callstack} Take this
80
+ * example: class L(nn.Module): def __init__(self):
81
+ * ...
82
+ * def forward(self, x):
83
+ * return x * 5
84
+ * class M(nn.Module):
85
+ * def __init__(self):
86
+ * ...
87
+ * def forward(self, x):
88
+ * return x - 2
89
+ * class N(nn.Module):
90
+ * def __init__(self):
91
+ * self.m = M()
92
+ * def forward(self, x):
93
+ * return self.m(x) + 3
94
+ * m = torch.jit.script(N())
95
+ * Once you inline m's forward method, m.forward.graph will look something
96
+ * like this
97
+ * graph(%self...):
98
+ * %x = aten::mul(..)
99
+ * %x = aten::sub(x, ..)
100
+ * %y = aten::add(x, ..)
101
+ * ..
102
+ * Inlined callstack ptr for these two nodes will look like:
103
+ * aten::mul's inlined CS (callstack): [N.forward, source range] -> [M.forward,
104
+ * source range] aten::sub's inlined CS (callstack): [N.forward, source range]
105
+ * aten::add's inlined CS: null
106
+ * mul node's inlined CS contains only information about the callsites' source
107
+ * range The information about mul node's source range ('return x * 5') is not
108
+ * available in its inlined CS. It is rather part of node's source range
109
+ * instead of inlined CS. Thus to get full stack: [N.forward, source range] ->
110
+ * [M.forward, source range] -> [aten::mul's source range] We need to track
111
+ * mul's source range and inlined CS both.
112
+ */
113
+
114
+ using BackendDebugInfoMapType =
115
+ std::unordered_map<torch::jit::DebugHandleType, DebugInfoTuple>;
116
+
117
+ /*
118
+ * This class is used to generate debug info map.
119
+ * backend's preprocess will call generate_debug_handles (see
120
+ * backend_detail.cpp), which uses debug_handle_manager to generate debug
121
+ * handles. When lowering process finishes, calling stopRecording will
122
+ * return debug info map from debug_handle_manager
123
+ */
124
+ class TORCH_API BackendDebugInfoRecorder {
125
+ public:
126
+ BackendDebugInfoRecorder() = default;
127
+ int64_t getNextDebugHandle(const Node* node);
128
+ // Reason this is not done as RAII is that work done in stopRecording
129
+ // can throw, and throwing with dtor will call terminate and thus voids any
130
+ // exception catching at a higher level.
131
+ BackendDebugInfoMapType stopRecording();
132
+ NodeToDebugHandle generate_debug_handles(const std::shared_ptr<Graph>& graph);
133
+
134
+ private:
135
+ static std::atomic<DebugHandleType> unique_debug_handle_;
136
+ BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_;
137
+ };
138
+
139
+ } // namespace jit
140
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool AddIfThenElseOp(std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace jit
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void Autocast(const std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API bool setAutocastMode(bool value);
12
+ TORCH_API bool autocastEnabled();
13
+
14
+ } // namespace jit
15
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // Replaces prim::Guard nodes with prim::BailOut nodes and
17
+ // computes sets of inputs needed to resume execution at
18
+ // bailout points
19
+ TORCH_API void InsertBailOuts(std::shared_ptr<Graph> graph);
20
+
21
+ // Builds a bailout graph into `target` (which is an empty graph)
22
+ // for a given bailout point `bailout_index`
23
+ // from the original graph `orig` (the original unoptimized graph)
24
+ // BailOut graphs allow Interpreter to resume
25
+ // execution of the (un/de)optimized graph (i.e.
26
+ // a graph that doesn't rely on any assumptions derived from
27
+ // on profiling information) from a given BailOut point
28
+ // should any of the assumptions fail for an actual input.
29
+ TORCH_API std::shared_ptr<Graph> BuildBailOutGraphFrom(
30
+ int64_t bailout_index,
31
+ const std::shared_ptr<Graph>& orig,
32
+ const std::shared_ptr<Graph>& target);
33
+ } // namespace jit
34
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void BatchMM(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool EliminateCommonSubexpression(
9
+ const std::shared_ptr<Graph>& graph);
10
+ }
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void ConstantPooling(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // If given a top-level graph, DCE will construct do alias analysis that allows
9
+ // for "smarter" dead code elimination (we will eliminate mutable ops if we can
10
+ // prove the mutated values are not used). Otherwise, we will not allow DCE to
11
+ // eliminate mutable ops.
12
+ //
13
+ // So, prefer to use the graph version if you can.
14
+ enum class DCESideEffectPolicy : uint8_t {
15
+ // default behavior: dead code elimination will check if a node has side
16
+ // effects
17
+ // and not delete it if it does.
18
+ DONT_DELETE_NODES_WITH_SIDE_EFFECTS,
19
+ // with this flag, dead code elimination will not check if a node has side
20
+ // effects and treat nodes with side effects like any other node,
21
+ // i.e. delete them if their outputs aren't used anywhere.
22
+ ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS
23
+ };
24
+
25
+ TORCH_API void EliminateDeadCode(
26
+ const std::shared_ptr<Graph>& graph,
27
+ DCESideEffectPolicy sideEffectPolicy =
28
+ DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
29
+ TORCH_API void EliminateDeadCode(
30
+ Block* block,
31
+ bool recurse = true,
32
+ DCESideEffectPolicy sideEffectPolicy =
33
+ DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
34
+
35
+ // Invoke the user-provided callback on all live values before deleting anything
36
+ TORCH_API void EliminateDeadCode(
37
+ Block* block,
38
+ std::function<void(const std::unordered_set<const Value*>&)> cb,
39
+ DCESideEffectPolicy sideEffectPolicy =
40
+ DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
41
+ } // namespace jit
42
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void DecomposeOps(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ struct Graph;
10
+
11
+ // Propagate tensor properties (e.g., dtype, device, is_contiguous, layout)
12
+ // propagation on all tensor objects. Currently, we only support dtype
13
+ // propagation
14
+ TORCH_API bool DtypePropagation(std::shared_ptr<Graph>& graph);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ // Directly after tracing, we have an ill-formed graph with blocks inserted.
10
+ // Example:
11
+ //
12
+ // graph(%self : ClassType<Module>,
13
+ // %input.1 : Float(3, 4)):
14
+ // %1 : ClassType<Module> = prim::GetAttr[name="relu1"](%self)
15
+ // %2 : ClassType<Module> = prim::GetAttr[name="relu2"](%self)
16
+ // %3 : ClassType<Module> = prim::GetAttr[name="rrr"](%2)
17
+ // = prim::TracedModuleForward[scope="__module.relu1"]()
18
+ // block0():
19
+ // %input : Float(3, 4) = aten::relu(%input.1),
20
+ // -> ()
21
+ // = prim::TracedModuleForward[scope="__module.relu2"](),
22
+ // block0():
23
+ // = prim::TracedModuleForward[scope="__module.relu2.rrr"](),
24
+ // block0():
25
+ // %6 : Float(3, 4) = aten::relu(%input),
26
+ // -> ()
27
+ // -> ()
28
+ // return (%6)
29
+ //
30
+ // In this pass, we:
31
+ // 1) Lift Value defs to as high of a scope as needed to ensure that
32
+ // they dominate all their uses. For example, `input` in the above
33
+ // graph needs to be lifted to the top-level block so that its use
34
+ // in the second `relu` operator is dominated.
35
+ // 2) Lambda lift the blocks. This ensures that all values used within
36
+ // each scope have their defs captured.
37
+ // 3) Convert the scope blocks into methods on their respective Modules,
38
+ // and convert TracedModuleForward nodes to CallMethod nodes into those
39
+ // methods.
40
+ //
41
+ // Then, we'll have a well-formed graph with proper method calls.
42
+ TORCH_API void FixupTraceScopeBlocks(
43
+ std::shared_ptr<Graph>& graph,
44
+ Module* self);
45
+
46
+ } // namespace jit
47
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ /** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this
9
+ * module and all its submodules, forward is included by default.
10
+ *
11
+ * The weight and bias of the Conv2d are correspondingly updated. Should only be
12
+ * used on modules in eval mode.
13
+ */
14
+ TORCH_API Module FoldConvBatchNorm(const Module& module);
15
+
16
+ struct TORCH_API ConvBNParameters {
17
+ at::Tensor conv_w;
18
+ at::Tensor conv_b;
19
+ at::Tensor bn_rm;
20
+ at::Tensor bn_rv;
21
+ double bn_eps = 0.0;
22
+ at::Tensor bn_w;
23
+ at::Tensor bn_b;
24
+ };
25
+
26
+ /**
27
+ * Given the current weight and bias tensors of a Conv module and parameters
28
+ * of the BatchNorm module we're folding with, compute the updated values
29
+ * for the weight and bias.
30
+ *
31
+ * The function is basically copied from torch/nn/utils/fusion.py
32
+ */
33
+ TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedConvWeightAndBias(
34
+ const ConvBNParameters& p);
35
+
36
+ } // namespace jit
37
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /** \brief Runs a set of Optimizations that Optimize Frozen Graphs
6
+ *
7
+ * Currently this set of optimizations is:
8
+ * - FoldFrozenConvBatchnorm
9
+ * - FoldFrozenConvAddOrSub
10
+ * - FoldFrozenConvMulOrDiv
11
+ * - FoldFrozenLinearBatchnorm
12
+ */
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ TORCH_API void OptimizeFrozenGraph(
18
+ std::shared_ptr<Graph>& graph,
19
+ bool optimize_numerics = true);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Transposes the weight matrix for frozen linear modules.
9
+ // and converts it into a matmul
10
+ TORCH_API bool FrozenLinearTranspose(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** \brief Fusing linear patterns as single at::linear for easier pattern
2
+ * matching in later passes
3
+ */
4
+ #pragma once
5
+
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ /** \brief Match the at::linear pattern and fuse it into a single at::linear
12
+ * This pass fuse the addmm or matmul + add generated by JIT back to linear
13
+ * This pass can be deleted once the JIT can emit the aten::linear in the future
14
+ */
15
+ TORCH_API void FuseLinear(std::shared_ptr<Graph>& graph);
16
+
17
+ /** Swap functional linear CallFunctions to aten::linear
18
+ */
19
+ TORCH_API void SwapFunctionalLinear(std::shared_ptr<Graph>& graph);
20
+ /** Swap all functional linear CallFunctions in module
21
+ */
22
+ TORCH_API void SwapFunctionalLinear(Module& module);
23
+ } // namespace jit
24
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/ir/irparser.h>
5
+ #include <torch/csrc/jit/ir/subgraph_matcher.h>
6
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace graph_rewrite_helper {
11
+
12
+ std::string getFuncName(Value* func_value);
13
+ Value* getValue(
14
+ const std::string& name,
15
+ const std::unordered_map<const Value*, Value*>& match_vmap,
16
+ const std::unordered_map<std::string, Value*>& vmap);
17
+ c10::optional<IValue> getIValue(
18
+ const std::string& name,
19
+ const std::unordered_map<const Value*, Value*>& match_vmap,
20
+ const std::unordered_map<std::string, Value*>& vmap);
21
+ TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr<Graph>& graph);
22
+
23
+ bool isClampFusable(
24
+ const Match& match,
25
+ const std::unordered_map<std::string, Value*>& vmap);
26
+
27
+ // This struct contains a compiled IR patterns slated for use in the
28
+ // findPatternMatches function. The struct encapsulates the common
29
+ // information from parseIR that is used in conjunction with the
30
+ // pattern matching facility. A const instance of this struct can
31
+ // also be stored away to cache the compiled IR pattern and reduce
32
+ // runtime cost
33
+ struct PatternInfo {
34
+ std::string pattern_string;
35
+ std::unique_ptr<Graph> pattern_graph;
36
+ std::unordered_map<std::string, Value*> vmap;
37
+ std::vector<MatchFilter> filters;
38
+
39
+ static PatternInfo parse_from_str(
40
+ std::string pattern_string,
41
+ const std::vector<MatchFilter>& filters = {}) {
42
+ PatternInfo rv{
43
+ std::move(pattern_string),
44
+ std::make_unique<Graph>(),
45
+ decltype(vmap){},
46
+ filters};
47
+ parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap);
48
+ return rv;
49
+ }
50
+ };
51
+
52
+ } // namespace graph_rewrite_helper
53
+ } // namespace jit
54
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void EliminateRedundantGuards(std::shared_ptr<Graph> graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ void HoistConvPackedParams(script::Module& m);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline Fork and Wait calls. This is used, for example, in ONNX export, where
9
+ // we do not support the explicit parallelism structures and would rather
10
+ // just have a flat graph. This inlines the forked section in the fork()
11
+ // callsite and replaces uses of the result of wait() calls with the values
12
+ // produced from the (now-inlined) forked section.
13
+ TORCH_API void InlineForkWait(const std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void InsertGuards(std::shared_ptr<Graph> graph);
17
+
18
+ TORCH_API void RemoveProfilingNodes(const std::shared_ptr<Graph>& graph);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void liftClosures(const std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <c10/util/sparse_bitset.h>
8
+ #include <torch/csrc/Export.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+ #include <list>
11
+ #include <unordered_map>
12
+ #include <vector>
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ using SparseBitVector = ::c10::SparseBitVector<256>;
17
+
18
+ // BuildLivenessSets computes "bailout" liveness which is equivalent to
19
+ // "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}"
20
+ TORCH_API std::unordered_map<Node*, std::vector<Value*>> BuildLivenessSets(
21
+ std::shared_ptr<Graph> graph);
22
+ } // namespace jit
23
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
9
+
10
+ // Given a graph with of a method which first argument is %self, lower it to a
11
+ // graph where all attributes accesses are replaced with explicit inputs of the
12
+ // graph (rather than results of prim::GetAttr executed on %self).
13
+ //
14
+ // Returns a tuple (graph, parameters) where the last module.parameters.size()
15
+ // inputs to the graph are the trainable parameters used in this method. The
16
+ // remaining inputs are the true inputs to the function.
17
+ TORCH_API std::pair<std::shared_ptr<Graph>, std::vector<IValue>> LowerGraph(
18
+ Graph& graph,
19
+ const ModulePtr& self);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/api/module.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <string>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ TORCH_API void metalInsertPrePackedOps(std::shared_ptr<Graph>& graph);
10
+ TORCH_API void metalInsertPrePackedOps(script::Module& module);
11
+ TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module);
12
+ TORCH_API void metalFoldPrePackingOps(script::Module& module);
13
+ TORCH_API script::Module metalOptimizeForMobile(
14
+ const script::Module& module,
15
+ const std::vector<std::string>& preserved_methods);
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
7
+
8
+ #if AT_MKLDNN_ENABLED()
9
+
10
+ #include <ideep/tensor.hpp>
11
+
12
+ #endif // AT_MKLDNN_ENABLED()
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ #if AT_MKLDNN_ENABLED()
18
+
19
+ namespace mkldnn {
20
+
21
+ const static std::map<std::string, std::vector<torch::jit::MatchFilter>>
22
+ fusion_rewrite_map = {
23
+ {"none", {}},
24
+ {"relu", {}},
25
+ };
26
+
27
+ } // namespace mkldnn
28
+
29
+ #endif // AT_MKLDNN_ENABLED()
30
+
31
+ void FuseConvWithEltwise(std::shared_ptr<Graph>& graph);
32
+
33
+ } // namespace jit
34
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // This pass converts aten ops to a normalized form. It is
9
+ // run immediately after IR generation in both the tracer and compiler,
10
+ // so downstream consumers of the IR do not need handle ops in their
11
+ // pre-normalized form.
12
+ // Currently only handles normalization of op aliases.
13
+ TORCH_API void NormalizeOps(const std::shared_ptr<Graph>& graph);
14
+
15
+ const std::unordered_map<Symbol, Symbol>& getOperatorAliasMap();
16
+
17
+ } // namespace jit
18
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/passes/pass_manager.h>
5
+
6
+ #include <ATen/Config.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace fuser {
11
+ namespace onednn {
12
+
13
+ static std::atomic<bool> onednn_enabled{true};
14
+
15
+ static std::atomic<bool>& getLlgaEnabled() {
16
+ return onednn_enabled;
17
+ }
18
+
19
+ TORCH_API void fuseGraph(std::shared_ptr<Graph>& g);
20
+
21
+ } // namespace onednn
22
+ } // namespace fuser
23
+
24
+ struct C10_EXPORT RegisterLlgaFuseGraph
25
+ : public PassManager<RegisterLlgaFuseGraph> {
26
+ static bool setEnabled(bool enabled) {
27
+ TORCH_CHECK(
28
+ AT_MKLDNN_ENABLED(),
29
+ "Running oneDNN Graph fuser is only supported with MKLDNN builds.");
30
+ bool oldState = fuser::onednn::getLlgaEnabled();
31
+ fuser::onednn::getLlgaEnabled() = enabled;
32
+ if (enabled) {
33
+ registerPass(fuser::onednn::fuseGraph);
34
+ } else {
35
+ clearPass();
36
+ }
37
+ return oldState;
38
+ }
39
+
40
+ static bool isEnabled() {
41
+ return fuser::onednn::getLlgaEnabled();
42
+ }
43
+
44
+ // override PassManager::registerPass to register pre-pass
45
+ static bool registerPass(GraphPass p) {
46
+ if (!isRegistered()) {
47
+ passID(registerPrePass(std::move(p)), true);
48
+ isRegistered(true);
49
+ return false;
50
+ }
51
+ return true;
52
+ }
53
+
54
+ // override PassManager::clearPass to clear pre-pass
55
+ static void clearPass() {
56
+ if (isRegistered()) {
57
+ clearPrePass(passID());
58
+ isRegistered(true);
59
+ }
60
+ }
61
+ };
62
+
63
+ } // namespace jit
64
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Peephole Optimizes alias sensitive peepholes
9
+ // Currently this is invoked as part of PeepholeOptimize
10
+ // return true if graph is modified
11
+ // Optimizes on TensorType if shape_peepholes is true
12
+ TORCH_API bool PeepholeOptimizeAliasSensitive(
13
+ const std::shared_ptr<Graph>& graph,
14
+ bool shape_peepholes);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Peephole Optimizes Dict Ops such as len() and __getitem__
9
+ // 1. getitem optimizations
10
+ // Given a function like this:
11
+ // def foo():
12
+ // d = {0 : 1}
13
+ // x = d[0]
14
+ // return x
15
+ // This pass produces (after dead code elimination):
16
+ // def foo(a, b):
17
+ // return 1
18
+ //
19
+ // This optimization can only happen if the dict is not modified
20
+ // and the dict has constant, non overlapping keys.
21
+ //
22
+ // 2. len optimizations
23
+ // Given a function like this:
24
+ // def foo():
25
+ // d = {0 : 1}
26
+ // return len(d)
27
+ // This pass produces (after dead code elimination):
28
+ // def foo():
29
+ // return 1
30
+ //
31
+ // This has the same requirements as the getitem optimizations.
32
+ //
33
+ // Currently this is invoked as part of PeepholeOptimize
34
+ // return true if graph is modified.
35
+ TORCH_API bool PeepholeOptimizeDictIdioms(const std::shared_ptr<Graph>& graph);
36
+
37
+ } // namespace jit
38
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ // Optimizing General Graph Patterns that
10
+ // are not covered in peephole.cpp and peephole_list_idioms
11
+ TORCH_API bool PeepholeOptimizeNonTensor(const std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ /** Recursively deduplicate multiple uses of the same module by
9
+ * creating an instance clone for each use of the module, which means
10
+ * the type will be the same as before and all the attributes will be
11
+ * copied, then we'll change the use of the original module to the use
12
+ * of cloned module in the Graph.
13
+ *
14
+ * This is done to ensure that modules can survive destructive passes
15
+ * without changing model behavior. For example, here:
16
+ *
17
+ * x = self.conv1(x)
18
+ * x = self.relu(x)
19
+ * x = self.conv2(x)
20
+ * x = self.relu(x)
21
+ *
22
+ * self.relu needs to be deduplicated for potential future destructive passes
23
+ * to work properly.
24
+ */
25
+ TORCH_API void DedupModuleUses(Module& module);
26
+
27
+ } // namespace jit
28
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/quantization/quantization_type.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /** \brief Backend specific pass to fuse dequantize - op - quantize calls
11
+ * as quantized_op calls.
12
+ *
13
+ * Right now this is a fusion for fbgemm backend and only works for quantized
14
+ * conv op, we'll extend to more ops and more backends in the future.
15
+ *
16
+ * Currently supported fusion:
17
+ * q(conv2d(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_conv2d(prepack(to_nhwc(a)),
18
+ * prepack(to_nhwc(w)),
19
+ * prepack(to_nhwc(b))))
20
+ *
21
+ * q(linear(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_linear(prepack(to_nhwc(a)),
22
+ * prepack(to_nhwc(w)),
23
+ * prepack(to_nhwc(b))))
24
+ *
25
+ * \param graph the graph we want to apply fusion
26
+ */
27
+ TORCH_API void QuantFusion(
28
+ std::shared_ptr<Graph>& graph,
29
+ QuantType quant_type = QuantType::STATIC);
30
+
31
+ /** \brief Insert prepack and unpack function in graph
32
+ * We want add pack/unpack functions for quantized weight because later we want
33
+ * to fold the packed weight as an attribute of the module, in order to reduce
34
+ * the cost of packing the weight on the fly in quantized models.
35
+ *
36
+ * Each quantized op has it's corresponding prepack/unpack function,
37
+ * right now, we only need to do prepack/unpack for quantized::linear
38
+ * and quantized::conv2d.
39
+ */
40
+ TORCH_API void InsertPrepackUnpack(std::shared_ptr<Graph>& graph);
41
+
42
+ /** \brief Insert pack and unpack function in all graphs
43
+ * of module
44
+ *
45
+ * Go through graphs of all the methods of all child modules
46
+ * and call InsertPrepackUnpack on the graph.
47
+ */
48
+ TORCH_API void InsertPrepackUnpack(Module& module);
49
+
50
+ TORCH_API script::Module Finalize(
51
+ script::Module& module,
52
+ QuantType quant_type = QuantType::STATIC,
53
+ const std::vector<std::string>& preserved_attrs =
54
+ std::vector<std::string>());
55
+
56
+ TORCH_API void FoldQuantizedPrepackingOps(Module& module);
57
+
58
+ TORCH_API Module FinalizeOnDevicePTQ(
59
+ Module& module,
60
+ QuantType quant_type,
61
+ const std::string& method_name);
62
+ } // namespace jit
63
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/fusion_passes.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ TORCH_API void FuseQuantizedAddRelu(std::shared_ptr<Graph>& graph);
8
+ } // namespace jit
9
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/helper.h ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/api/module.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/ir/subgraph_matcher.h>
5
+ #include <torch/csrc/jit/passes/graph_rewrite_helper.h>
6
+ #include <torch/csrc/jit/passes/quantization/quantization_type.h>
7
+
8
+ #include <functional>
9
+ #include <regex>
10
+
11
+ namespace torch {
12
+ namespace jit {
13
+
14
+ using graph_rewrite_helper::getFuncName;
15
+
16
+ // Vector of a module and the name of its method
17
+ using ModuleMethodVector = std::vector<std::pair<Module, std::string>>;
18
+ // Map of quantization parameter name and value
19
+ // for example _scale, _zero_point,
20
+ // _scalar_type and _axis(for per channel quantization)
21
+ using QParamVector = std::vector<std::pair<std::string, IValue>>;
22
+
23
+ // =========== helper functions for Value =========
24
+ // Check if a value is weight, since we need to use weight observer
25
+ // for weight
26
+ TORCH_API bool isWeight(Value* v);
27
+
28
+ // Check if a value is bias for conv and linear, which we do not
29
+ // quantize
30
+ TORCH_API bool isBiasOfConvOrLinear(Value* v);
31
+
32
+ TORCH_API bool isEmbeddingBagNonInput(Value* v);
33
+
34
+ // Get the use as scalar input of clamp ops for the input value
35
+ c10::optional<Use> getClampScalarInputUse(Value* v);
36
+
37
+ // For a given value `v`, get the list of values that we need to check
38
+ // if they are observed/quantized or not, if so, we can say the
39
+ // `v` is also observed/quantized, since we can derive
40
+ // the quantization parameters for `v` given the list of values
41
+ TORCH_API std::vector<Value*> getPassThroughInputs(Value* v);
42
+
43
+ // Clones the method by the name of orig_method_name into new_method_name method
44
+ TORCH_API void cloneMethod(
45
+ Module& module,
46
+ const std::string& orig_method_name,
47
+ const std::string& new_method_name);
48
+
49
+ // Check if a value in the graph is a Scalar value
50
+ TORCH_API bool isScalar(Value* v);
51
+
52
+ // Check if value is the input of the graph
53
+ TORCH_API bool hitGraphInput(Value* value);
54
+
55
+ // Converts a mangled name, such as
56
+ // __torch__.torch.ao.nn.quantized.modules.conv.___torch_mangle_7.Conv2d
57
+ // into an unmangled name, such as
58
+ // __torch__.torch.ao.nn.quantized.modules.conv.Conv2d
59
+ TORCH_API std::string removeTorchMangle(const std::string& orig_name);
60
+
61
+ // Return the module name that corresponds to the value.
62
+ TORCH_API c10::optional<std::string> getModuleName(Value* value);
63
+
64
+ // =========== helper functions for Node =========
65
+ TORCH_API bool isSingleInputGeneralShapeAtenFunction(Node* n);
66
+
67
+ TORCH_API bool isSingleInputGeneralValueAtenFunction(Node* n);
68
+
69
+ TORCH_API bool isSingleInputGeneralCallFunction(Node* n);
70
+
71
+ TORCH_API bool isSingleInputGeneralAtenFunction(Node* n);
72
+
73
+ TORCH_API bool isClamp(Node* n);
74
+
75
+ // Check if the node will produce the same result regardless of whether
76
+ // the input tensor is quantized or not, example: aten::size
77
+ TORCH_API bool isTensorInfoNode(Node* n);
78
+
79
+ // Check if this the propagate op that has single input, e.g. aten::cat
80
+ TORCH_API bool isPropagateQuantSingleInputOp(Node* n);
81
+
82
+ // Check if this is the propagate op that has two inputs, e.g. aten::add
83
+ TORCH_API bool isPropagateQuantBinaryOp(Node* n);
84
+
85
+ // Check if this is the node that we'll quantize or not quantize depending on
86
+ // whether the input of the node is quantized, example: aten::cat
87
+ TORCH_API bool isPropagateQuantOp(Node* n);
88
+
89
+ // Check if the node is a binary op like aten::add and aten::mul and
90
+ // if the input 1 is a scalar, these ops will be quantized to
91
+ // quantized::{op}_scalar
92
+ TORCH_API bool isBinaryOpWithScalarInput(Node* n);
93
+
94
+ TORCH_API c10::optional<std::tuple<c10::QScheme, QParamVector>> getFixedQParams(
95
+ Node* n);
96
+
97
+ // We don't want to analyze the graph for some `builtin` CallFunctions
98
+ // like `linear` because we want to preserve the op boundary
99
+ TORCH_API bool userDefinedCallFunction(Node* n);
100
+
101
+ // Check if the node has scalar input
102
+ TORCH_API bool hasScalarInput(Node* n);
103
+
104
+ // Check if a node is quantizable
105
+ TORCH_API bool nodeQuantizable(
106
+ Node* n,
107
+ QuantType quant_type = QuantType::STATIC);
108
+
109
+ // Nodes which only require quantization of weight value, eg. embedding_bag
110
+ bool isWeightOnlyStaticQuantOp(Node* n);
111
+
112
+ // Check if a use of the value is quantizable, this depends on
113
+ // both the use node and the offset
114
+ TORCH_API bool useQuantizable(const Use& use, QuantType quant_type);
115
+
116
+ // Given a CallFunction node, extract the graph of the called function
117
+ TORCH_API std::shared_ptr<Graph> getCallFunctionGraph(Node* n);
118
+
119
+ // Check if `use` is a CallFunction of name `func_name` and if value
120
+ // `v` is the nth argument (if provided) of the function
121
+ bool matchCallFuncToUse(
122
+ const Use& use,
123
+ const std::string& func_name,
124
+ c10::optional<int> nth_arg);
125
+
126
+ // Check if `use` is a AtenFunction of name `func_name` and if value
127
+ // `v` is the nth argument (if provided) of the function
128
+ bool matchAtenFuncToUse(
129
+ const Use& use,
130
+ const std::string& func_name,
131
+ c10::optional<int> nth_arg);
132
+
133
+ // =========== helper functions for Block =========
134
+ // checks if a block will always raise an Exception
135
+ TORCH_API bool alwaysRaisesException(Block* block);
136
+
137
+ // =========== helper functions for Module ==========
138
+ // TODO: remove
139
+ TORCH_API std::vector<std::string> getModuleAccessPath(
140
+ Value* instance,
141
+ Value* self);
142
+ // TODO: remove
143
+ TORCH_API Module
144
+ findChildModule(const Module& module, const std::vector<std::string>& path);
145
+
146
+ // Given an CallMethod node, get the module instance corresponding
147
+ // to the instance Value
148
+ // TODO: refactor all current uses of this function to the Opt one
149
+ TORCH_API Module getInvokedModule(Module& module, Node* n, Value* self);
150
+
151
+ // Given an CallMethod node, get the module instance corresponding
152
+ // to the instance Value if the instance is a module, otherwise return
153
+ // c10::nullopt
154
+ c10::optional<Module> getInvokedModuleOpt(
155
+ const Module& module,
156
+ Node* n,
157
+ Value* self);
158
+
159
+ // ==================== filter functions for matches ==============
160
+ // filter to check Value `vname` is a constant of int value `value`
161
+ bool is_int_constant(
162
+ const Match& match,
163
+ const std::unordered_map<std::string, Value*>& vmap,
164
+ const std::string& vname,
165
+ int value);
166
+
167
+ // filter to check if the %alpha argument of aten::add is constant 1
168
+ bool aten_add_alpha_is_one(
169
+ const Match& match,
170
+ const std::unordered_map<std::string, Value*>& vmap);
171
+
172
+ // filter to check if the functional in CallFunction is relu
173
+ bool is_functional_relu(
174
+ const Match& match,
175
+ const std::unordered_map<std::string, Value*>& vmap);
176
+
177
+ // filter to check if the module is torch.nn.ReLU
178
+ bool is_relu_module(
179
+ const Match& match,
180
+ const std::unordered_map<std::string, Value*>& vmap);
181
+
182
+ bool is_linear_module(
183
+ const Match& match,
184
+ const std::unordered_map<std::string, Value*>& vmap);
185
+
186
+ // TODO: add a macro to declare the filters
187
+ bool is_conv1d_module(
188
+ const Match& match,
189
+ const std::unordered_map<std::string, Value*>& vmap);
190
+
191
+ bool is_conv2d_module(
192
+ const Match& match,
193
+ const std::unordered_map<std::string, Value*>& vmap);
194
+
195
+ bool is_conv3d_module(
196
+ const Match& match,
197
+ const std::unordered_map<std::string, Value*>& vmap);
198
+
199
+ bool is_conv_transpose1d_module(
200
+ const Match& match,
201
+ const std::unordered_map<std::string, Value*>& vmap);
202
+
203
+ bool is_conv_transpose2d_module(
204
+ const Match& match,
205
+ const std::unordered_map<std::string, Value*>& vmap);
206
+
207
+ bool is_batchnorm2d_module(
208
+ const Match& match,
209
+ const std::unordered_map<std::string, Value*>& vmap);
210
+
211
+ bool is_batchnorm3d_module(
212
+ const Match& match,
213
+ const std::unordered_map<std::string, Value*>& vmap);
214
+
215
+ } // namespace jit
216
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_observers.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/passes/quantization/quantization_type.h>
5
+
6
+ namespace std {
7
+
8
+ template <>
9
+ struct hash<torch::jit::Module> {
10
+ inline size_t operator()(const torch::jit::Module& arg) const {
11
+ return std::hash<c10::intrusive_ptr<c10::ivalue::Object>>()(arg._ivalue());
12
+ }
13
+ };
14
+
15
+ } // namespace std
16
+
17
+ namespace torch {
18
+ namespace jit {
19
+
20
+ using QConfig = std::tuple<Module, Module>;
21
+ using QConfigDict = std::unordered_map<std::string, c10::optional<QConfig>>;
22
+
23
+ /** \brief Insert observer module and observer function call for
24
+ * the Tensors that needs to be observed.
25
+ *
26
+ * For each Tensor that needs to be observed in the method, insert observer
27
+ * module to the input module and add forward calls of observer to the specified
28
+ * method.
29
+ *
30
+ * \param module the input module
31
+ * \param method_name the method we want to insert observers for
32
+ * \param qconfig_dict the qconfig dictionary that specifies how
33
+ * each module is going to be quantized
34
+ * \param inplace whether we want to do inplace modification to the input module
35
+ * or clone the module
36
+ * \param is_dynamic whether the dynamic quantization script is being used.
37
+ */
38
+ TORCH_API Module InsertObservers(
39
+ Module& module,
40
+ const std::string& method_name,
41
+ const QConfigDict& qconfig_dict,
42
+ bool inplace,
43
+ QuantType quant_type = QuantType::STATIC);
44
+
45
+ /** \brief Insert observer module and observer method for
46
+ * the Tensors that needs to be observed.
47
+ *
48
+ * For each Tensor that needs to be observed in the method, insert observer
49
+ * module to the input module and observe_<method-name> methods to the module.
50
+ * This method is clone of mehtod_name with forward calls of observer added.
51
+ *
52
+ * \param module the input module
53
+ * \param method_name the method we want to insert observers for
54
+ * \param qconfig_dict the qconfig dictionary that specifies how
55
+ * each module is going to be quantized
56
+ * \param inplace whether we want to do inplace modification to the input module
57
+ * or clone the module
58
+ * \param is_dynamic whether the dynamic quantization script is being used.
59
+ */
60
+ TORCH_API Module InsertObserversForOnDevicePTQ(
61
+ Module& module,
62
+ const std::string& method_name,
63
+ const QConfigDict& qconfig_dict,
64
+ bool inplace,
65
+ QuantType quant_type = QuantType::STATIC);
66
+
67
+ } // namespace jit
68
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/quantization/quantization_type.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /** Replicate quantize node for prim::If blocks, so that we can match
11
+ * quantization patterns in prim::If blocks
12
+ */
13
+ TORCH_API void ReplicateQuant(std::shared_ptr<Graph>& graph);
14
+
15
+ /** Replicate dequantize node for each use, so that we can match
16
+ * quantization patterns
17
+ */
18
+ TORCH_API void ReplicateDeQuant(std::shared_ptr<Graph>& graph);
19
+
20
+ /** \brief Insert quantize - dequantize calls to the Tensors
21
+ * that are observed in insert_observers pass
22
+ *
23
+ * For each Tensor that is observed, get the observer module and call
24
+ * calculate_qparam on the observer module to get quantization parameters
25
+ * and add quantize - int_repr - dequantize function calls using these
26
+ * parameters we also have special handling for quantizing "bias" right now.
27
+ *
28
+ * \param module the input module
29
+ * \param method_name the method we want to insert quantization calls for
30
+ */
31
+ TORCH_API Module InsertQuantDeQuant(
32
+ Module& module,
33
+ const std::string& method_name,
34
+ bool inplace,
35
+ bool debug,
36
+ QuantType quant_type = QuantType::STATIC);
37
+
38
+ TORCH_API Module InsertQuantDeQuantOnDevicePTQ(
39
+ Module& module,
40
+ const std::string& method_name,
41
+ bool inplace,
42
+ bool debug,
43
+ QuantType quant_type = QuantType::STATIC);
44
+
45
+ } // namespace jit
46
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h ADDED
@@ -0,0 +1,1272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/ir/subgraph_matcher.h>
6
+ #include <torch/csrc/jit/jit_log.h>
7
+ #include <torch/csrc/jit/passes/quantization/helper.h>
8
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
9
+ #include <string>
10
+ #include <unordered_map>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ struct QuantFusionInfo {
17
+ std::string quantized_op_name;
18
+ std::string pattern;
19
+ std::string replacement;
20
+ std::vector<MatchFilter> filters = {};
21
+ };
22
+
23
+ namespace {
24
+ std::string getExtraArgList(std::vector<std::string> extra_args) {
25
+ return std::accumulate(
26
+ extra_args.begin(),
27
+ extra_args.end(),
28
+ std::string(),
29
+ [](std::string acc, const std::string& arg) { return acc + ", " + arg; });
30
+ }
31
+
32
+ // Get the pattern we want to replace the match with
33
+ std::string getAtenOpPattern(
34
+ const std::string& graph_header,
35
+ const std::string& op_name,
36
+ const std::vector<std::string>& extra_op_args,
37
+ bool scalar_args = false) {
38
+ std::vector<std::string> _extra_op_args = extra_op_args;
39
+ std::string aten_op_pattern = graph_header;
40
+ if (scalar_args) {
41
+ for (const auto& extra_arg : _extra_op_args) {
42
+ aten_op_pattern
43
+ .append(R"(
44
+ )")
45
+ .append(extra_arg)
46
+ .append("_scalar = aten::item(")
47
+ .append(extra_arg)
48
+ .append(")");
49
+ }
50
+
51
+ for (auto& _extra_op_arg : _extra_op_args) {
52
+ _extra_op_arg.append("_scalar");
53
+ }
54
+ }
55
+ const auto& extra_op_arg_list = getExtraArgList(std::move(_extra_op_args));
56
+ aten_op_pattern += R"(
57
+ %r = )";
58
+ aten_op_pattern += op_name + "(" + "%a_quant" + extra_op_arg_list + ")";
59
+ aten_op_pattern += R"(
60
+ return (%r) )";
61
+ return aten_op_pattern;
62
+ }
63
+
64
+ // generate ops for quantize pattern for a scalar value
65
+ std::string getQuantizeForScalar(const std::string& value) {
66
+ // 6 is `torch.float` ScalarType, we are creating a float scalar
67
+ // tensor from a scalar value
68
+ std::string quantize_pattern = R"(
69
+ )" +
70
+ value + "_float_scalar_type : int = prim::Constant[value=6]()";
71
+ quantize_pattern += R"(
72
+ )" +
73
+ value + "_none : None = prim::Constant()";
74
+ quantize_pattern += R"(
75
+ )" +
76
+ value + "_tensor : Tensor = aten::scalar_tensor(" + value + ", " + value +
77
+ "_float_scalar_type";
78
+ for (const auto i : c10::irange(3)) {
79
+ (void)i; // Suppress unused variable warning
80
+ quantize_pattern += ", " + value + "_none";
81
+ }
82
+ quantize_pattern += ")";
83
+ quantize_pattern +=
84
+ R"(
85
+ )" +
86
+ value + "_quant = aten::quantize_per_tensor(" + value + "_tensor" +
87
+ getExtraArgList(
88
+ {value + "_scale", value + "_zero_point", value + "_dtype"}) +
89
+ ")";
90
+ return quantize_pattern;
91
+ }
92
+
93
+ std::string getDequantize(const std::string& value) {
94
+ return R"(
95
+ )" +
96
+ value + "_dequant = aten::dequantize(" + value + "_quant)";
97
+ }
98
+
99
+ std::string getItem(const std::string& value) {
100
+ return R"(
101
+ )" +
102
+ value + "_scalar : float = aten::item(" + value + "_dequant)";
103
+ }
104
+
105
+ // Patterns for the ops that inherit parameters from input
106
+ std::string getInputTensorQParamOpPattern(
107
+ const std::string& op_name,
108
+ const std::vector<std::string>& extra_op_args) {
109
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
110
+ std::string op_pattern = "graph(%a_quant" + extra_op_arg_list + "):" + R"(
111
+ %a_dequant = aten::dequantize(%a_quant)
112
+ %r = )" +
113
+ op_name + "(" + "%a_dequant" + extra_op_arg_list + ")" + R"(
114
+ %r_scale : float = aten::q_scale(%a_quant)
115
+ %r_zero_point : int = aten::q_zero_point(%a_quant)
116
+ %r_dtype : int = prim::dtype(%a_quant)
117
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
118
+ return (%r_quant) )";
119
+ return op_pattern;
120
+ }
121
+
122
+ // QuantFusionInfo for the ops that inherit parameters from input
123
+ QuantFusionInfo getInputTensorQParamOpFusionInfo(
124
+ const std::string& op_name,
125
+ const std::vector<std::string>& extra_op_args) {
126
+ std::string op_pattern =
127
+ getInputTensorQParamOpPattern(op_name, extra_op_args);
128
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
129
+ std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):";
130
+ std::string op_replacement =
131
+ getAtenOpPattern(graph_header, op_name, extra_op_args);
132
+
133
+ return {op_name, std::move(op_pattern), std::move(op_replacement)};
134
+ }
135
+
136
+ // quant fusion for ops like `quantized::add_scalar`, `quantized::mul_scalar`
137
+ QuantFusionInfo getBinaryOpScalarFusionInfo(
138
+ const std::string& op_name,
139
+ const std::vector<std::string>& extra_op_args,
140
+ const std::string& quantized_op_name,
141
+ const std::vector<std::string>& extra_quantized_op_args,
142
+ const std::vector<MatchFilter>& filters = {}) {
143
+ std::string op_pattern =
144
+ getInputTensorQParamOpPattern(op_name, extra_op_args);
145
+
146
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
147
+ std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):";
148
+ std::string op_replacement = getAtenOpPattern(
149
+ graph_header, quantized_op_name, extra_quantized_op_args);
150
+
151
+ return {op_name, std::move(op_pattern), std::move(op_replacement), filters};
152
+ }
153
+
154
+ QuantFusionInfo getClampOpFusionInfo(
155
+ const std::string& op_name,
156
+ const std::vector<std::string>& extra_op_args) {
157
+ std::vector<std::string> header_args = extra_op_args;
158
+ std::vector<std::string> input_qparams = {"_scale", "_zero_point", "_dtype"};
159
+ for (const auto& arg : extra_op_args) {
160
+ for (const auto& qparam : input_qparams) {
161
+ header_args.push_back(arg + qparam);
162
+ }
163
+ }
164
+ for (const auto& qparam : input_qparams) {
165
+ header_args.push_back("%r" + qparam);
166
+ }
167
+ const auto& extra_header_arg_list = getExtraArgList(std::move(header_args));
168
+ std::string graph_header = "graph(%a_quant" + extra_header_arg_list + "):";
169
+ std::string op_pattern = graph_header;
170
+ for (const auto& arg : extra_op_args) {
171
+ op_pattern += getQuantizeForScalar(arg);
172
+ op_pattern += getDequantize(arg);
173
+ op_pattern += getItem(arg);
174
+ }
175
+ op_pattern += getDequantize("%a");
176
+ op_pattern += R"(
177
+ %r = )";
178
+ std::vector<std::string> scalar_extra_args;
179
+ scalar_extra_args.reserve(extra_op_args.size());
180
+ for (const auto& arg : extra_op_args) {
181
+ scalar_extra_args.push_back(arg + "_scalar");
182
+ }
183
+ op_pattern += op_name + "(" + "%a_dequant" +
184
+ getExtraArgList(std::move(scalar_extra_args)) + ")";
185
+ // IR pattern common to all ops that inherit qparam from input
186
+ op_pattern += R"(
187
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
188
+ return (%r_quant) )";
189
+
190
+ std::string aten_op_pattern =
191
+ getAtenOpPattern(graph_header, op_name, extra_op_args);
192
+
193
+ return {op_name, std::move(op_pattern), std::move(aten_op_pattern)};
194
+ }
195
+
196
+ // Patterns for the ops that has fixed quantization parameters
197
+ QuantFusionInfo getFixedQParamOpFusionInfo(
198
+ const std::string& op_name,
199
+ const std::vector<std::string>& extra_op_args,
200
+ bool is_symmetric) {
201
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
202
+ std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):";
203
+ std::string op_pattern = graph_header;
204
+ op_pattern += R"(
205
+ %a_dequant = aten::dequantize(%a_quant)
206
+ %r = )";
207
+ op_pattern += op_name + "(" + "%a_dequant" + extra_op_arg_list + ")";
208
+ // IR pattern common to all ops with fixed quantization parameters for
209
+ // asymetric quantization
210
+ std::string asym_fixed_qparam_op_suffix = R"(
211
+ %r_scale : float = prim::Constant[value=0.00390625]()
212
+ %r_zero_point : int = prim::Constant[value=0]()
213
+ %r_dtype : int = prim::Constant[value=13]()
214
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
215
+ return (%r_quant) )";
216
+
217
+ std::string sym_fixed_qparam_op_suffix = R"(
218
+ %r_scale : float = prim::Constant[value=0.0078125]()
219
+ %r_zero_point : int = prim::Constant[value=128]()
220
+ %r_dtype : int = prim::Constant[value=13]()
221
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
222
+ return (%r_quant) )";
223
+ op_pattern +=
224
+ is_symmetric ? sym_fixed_qparam_op_suffix : asym_fixed_qparam_op_suffix;
225
+
226
+ std::string aten_op_pattern =
227
+ getAtenOpPattern(graph_header, op_name, extra_op_args);
228
+
229
+ return {op_name, std::move(op_pattern), std::move(aten_op_pattern)};
230
+ }
231
+
232
+ // filter that checks %b_scalar is a scalar
233
+ bool input_b_is_scalar(
234
+ const Match& match,
235
+ const std::unordered_map<std::string, Value*>& vmap) {
236
+ const auto& match_vmap = match.values_map;
237
+ auto b_scalar = match_vmap.at(vmap.at("b_scalar"));
238
+ return isScalar(b_scalar);
239
+ }
240
+
241
+ // Patterns for ops that require observation for output quantization parameters
242
+ // Example:
243
+ //
244
+ // before fusion:
245
+ //
246
+ // graph(%a_quant, %r_scale, %r_zero_point, %r_dtype):
247
+ // %a_dequant = aten::dequantize(%a_quant)
248
+ // %r = {op_name}(%a_dequant, {extra_args})
249
+ // %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point,
250
+ // %r_dtype) return (%r_quant)
251
+ //
252
+ // after fusion:
253
+ //
254
+ // graph(%a_quant, %r_scale, %r_zero_point, %r_dtype):
255
+ // %r_quant = {quantized_op_name}(%a_quant, {extra_args}, %r_scale,
256
+ // %r_zero_point) return (%r_quant)
257
+ QuantFusionInfo getObservedQParamOpFusionInfo(
258
+ const std::string& fp_op_name,
259
+ const std::string& q_op_name,
260
+ const std::vector<std::string>& fp_extra_args,
261
+ const std::vector<std::string>& q_extra_args) {
262
+ const auto& fp_extra_arg_list = getExtraArgList(fp_extra_args);
263
+ const auto& q_extra_arg_list = getExtraArgList(q_extra_args);
264
+
265
+ std::string op_pattern = "graph(%a_quant" + fp_extra_arg_list +
266
+ ", %r_scale, %r_zero_point, %r_dtype):" + R"(
267
+ %a_dequant = aten::dequantize(%a_quant)
268
+ %r = )" +
269
+ fp_op_name + "(" + "%a_dequant" + fp_extra_arg_list + ")" + R"(
270
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
271
+ return (%r_quant) )";
272
+
273
+ std::string aten_op_pattern = "graph(%a_quant" + fp_extra_arg_list +
274
+ ", %r_scale, %r_zero_point, %r_dtype):" + R"(
275
+ %r_quant = )" +
276
+ q_op_name + "(%a_quant" + q_extra_arg_list +
277
+ ", %r_scale, %r_zero_point)" + R"(
278
+ return (%r_quant) )";
279
+
280
+ return {q_op_name, std::move(op_pattern), std::move(aten_op_pattern)};
281
+ }
282
+
283
+ } // namespace
284
+
285
+ static std::vector<QuantFusionInfo> quant_fusion_pattern_and_replacements() {
286
+ // aten::conv1d
287
+ std::string conv1d = R"(
288
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
289
+ %a_dequant = aten::dequantize(%a_quant)
290
+ %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params)
291
+ %w_dequant = aten::dequantize(%w_quant)
292
+ %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
293
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
294
+ return (%r_quant) )";
295
+
296
+ // aten::conv1d - aten::relu
297
+ std::string conv1d_relu = R"(
298
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
299
+ %a_dequant = aten::dequantize(%a_quant)
300
+ %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params)
301
+ %w_dequant = aten::dequantize(%w_quant)
302
+ %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
303
+ %r = aten::relu(%conv_out)
304
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
305
+ return (%r_quant) )";
306
+
307
+ // aten::conv1d - aten::relu_
308
+ std::string conv1d_inplace_relu = R"(
309
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
310
+ %a_dequant = aten::dequantize(%a_quant)
311
+ %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params)
312
+ %w_dequant = aten::dequantize(%w_quant)
313
+ %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
314
+ %r = aten::relu_(%conv_out)
315
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
316
+ return (%r_quant) )";
317
+
318
+ // quantized::conv1d
319
+ std::string quantized_conv1d = R"(
320
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
321
+ %r_quant = quantized::conv1d(%a_quant, %packed_params, %r_scale, %r_zero_point)
322
+ return (%r_quant) )";
323
+
324
+ // quantized::conv1d_relu
325
+ std::string quantized_conv1d_relu = R"(
326
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
327
+ %r_quant = quantized::conv1d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
328
+ return (%r_quant) )";
329
+
330
+ // aten::conv2d
331
+ std::string conv2d = R"(
332
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
333
+ %a_dequant = aten::dequantize(%a_quant)
334
+ %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params)
335
+ %w_dequant = aten::dequantize(%w_quant)
336
+ %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
337
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
338
+ return (%r_quant) )";
339
+
340
+ // aten::conv2d - aten::relu
341
+ std::string conv2d_relu = R"(
342
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
343
+ %a_dequant = aten::dequantize(%a_quant)
344
+ %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params)
345
+ %w_dequant = aten::dequantize(%w_quant)
346
+ %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
347
+ %r = aten::relu(%conv_out)
348
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
349
+ return (%r_quant) )";
350
+
351
+ // aten::conv2d - aten::relu_
352
+ std::string conv2d_inplace_relu = R"(
353
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
354
+ %a_dequant = aten::dequantize(%a_quant)
355
+ %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params)
356
+ %w_dequant = aten::dequantize(%w_quant)
357
+ %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
358
+ %r = aten::relu_(%conv_out)
359
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
360
+ return (%r_quant) )";
361
+
362
+ // quantized::conv2d
363
+ std::string quantized_conv2d = R"(
364
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
365
+ %r_quant = quantized::conv2d(%a_quant, %packed_params, %r_scale, %r_zero_point)
366
+ return (%r_quant) )";
367
+
368
+ // quantized::conv2d_relu
369
+ std::string quantized_conv2d_relu = R"(
370
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
371
+ %r_quant = quantized::conv2d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
372
+ return (%r_quant) )";
373
+
374
+ // aten::conv3d
375
+ std::string conv3d = R"(
376
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
377
+ %a_dequant = aten::dequantize(%a_quant)
378
+ %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params)
379
+ %w_dequant = aten::dequantize(%w_quant)
380
+ %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
381
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
382
+ return (%r_quant) )";
383
+
384
+ // aten::conv3d - aten::relu
385
+ std::string conv3d_relu = R"(
386
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
387
+ %a_dequant = aten::dequantize(%a_quant)
388
+ %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params)
389
+ %w_dequant = aten::dequantize(%w_quant)
390
+ %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
391
+ %r = aten::relu(%conv_out)
392
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
393
+ return (%r_quant) )";
394
+
395
+ // aten::conv3d - aten::relu_
396
+ std::string conv3d_inplace_relu = R"(
397
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
398
+ %a_dequant = aten::dequantize(%a_quant)
399
+ %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params)
400
+ %w_dequant = aten::dequantize(%w_quant)
401
+ %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
402
+ %r = aten::relu_(%conv_out)
403
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
404
+ return (%r_quant) )";
405
+
406
+ // quantized::conv3d
407
+ std::string quantized_conv3d = R"(
408
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
409
+ %r_quant = quantized::conv3d(%a_quant, %packed_params, %r_scale, %r_zero_point)
410
+ return (%r_quant) )";
411
+
412
+ // quantized::conv3d_relu
413
+ std::string quantized_conv3d_relu = R"(
414
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
415
+ %r_quant = quantized::conv3d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
416
+ return (%r_quant) )";
417
+
418
+ // aten::conv_transpose1d
419
+ std::string conv_transpose1d = R"(
420
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
421
+ %a_dequant = aten::dequantize(%a_quant)
422
+ %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose1d_unpack(%packed_params)
423
+ %w_dequant = aten::dequantize(%w_quant)
424
+ %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
425
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
426
+ return (%r_quant) )";
427
+
428
+ // quantized::conv_transpose1d
429
+ std::string quantized_conv_transpose1d = R"(
430
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
431
+ %r_quant = quantized::conv_transpose1d(%a_quant, %packed_params, %r_scale, %r_zero_point)
432
+ return (%r_quant) )";
433
+
434
+ // aten::conv_transpose2d
435
+ std::string conv_transpose2d = R"(
436
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
437
+ %a_dequant = aten::dequantize(%a_quant)
438
+ %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose2d_unpack(%packed_params)
439
+ %w_dequant = aten::dequantize(%w_quant)
440
+ %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
441
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
442
+ return (%r_quant) )";
443
+
444
+ // quantized::conv_transpose1d
445
+ std::string quantized_conv_transpose2d = R"(
446
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
447
+ %r_quant = quantized::conv_transpose2d(%a_quant, %packed_params, %r_scale, %r_zero_point)
448
+ return (%r_quant) )";
449
+
450
+ std::string add_relu = R"(
451
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
452
+ %a_dequant = aten::dequantize(%a_quant)
453
+ %b_dequant = aten::dequantize(%b_quant)
454
+ %r_add = aten::add(%a_dequant, %b_dequant, %alpha)
455
+ %r_relu = aten::relu(%r_add)
456
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
457
+ return (%r) )";
458
+
459
+ std::string add_inplace_relu = R"(
460
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
461
+ %a_dequant = aten::dequantize(%a_quant)
462
+ %b_dequant = aten::dequantize(%b_quant)
463
+ %r_add = aten::add(%a_dequant, %b_dequant, %alpha)
464
+ %r_relu = aten::relu_(%r_add)
465
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
466
+ return (%r) )";
467
+
468
+ std::string inplace_add_relu = R"(
469
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
470
+ %a_dequant = aten::dequantize(%a_quant)
471
+ %b_dequant = aten::dequantize(%b_quant)
472
+ %r_add = aten::add_(%a_dequant, %b_dequant, %alpha)
473
+ %r_relu = aten::relu(%r_add)
474
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
475
+ return (%r) )";
476
+
477
+ std::string inplace_add_inplace_relu = R"(
478
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
479
+ %a_dequant = aten::dequantize(%a_quant)
480
+ %b_dequant = aten::dequantize(%b_quant)
481
+ %r_add = aten::add_(%a_dequant, %b_dequant, %alpha)
482
+ %r_relu = aten::relu_(%r_add)
483
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
484
+ return (%r) )";
485
+
486
+ std::string quantized_add_relu = R"(
487
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
488
+ %r = quantized::add_relu(%a_quant, %b_quant, %scale, %zero_point)
489
+ return (%r) )";
490
+
491
+ // aten::linear
492
+ std::string linear = R"(
493
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
494
+ %a_dequant = aten::dequantize(%a_quant)
495
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
496
+ %w_dequant = aten::dequantize(%w_quant)
497
+ %r = aten::linear(%a_dequant, %w_dequant, %b)
498
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
499
+ return (%r_quant) )";
500
+
501
+ std::string linear_relu = R"(
502
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
503
+ %a_dequant = aten::dequantize(%a_quant)
504
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
505
+ %w_dequant = aten::dequantize(%w_quant)
506
+ %linear_out = aten::linear(%a_dequant, %w_dequant, %b)
507
+ %r = aten::relu(%linear_out)
508
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
509
+ return (%r_quant) )";
510
+
511
+ std::string linear_inplace_relu = R"(
512
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
513
+ %a_dequant = aten::dequantize(%a_quant)
514
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
515
+ %w_dequant = aten::dequantize(%w_quant)
516
+ %linear_out = aten::linear(%a_dequant, %w_dequant, %b)
517
+ %r = aten::relu_(%linear_out)
518
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
519
+ return (%r_quant) )";
520
+
521
+ // quantized::linear
522
+ std::string quantized_linear = R"(
523
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
524
+ %r = quantized::linear(%a_quant, %packed_params, %r_scale, %r_zero_point)
525
+ return (%r) )";
526
+
527
+ std::string quantized_linear_relu = R"(
528
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
529
+ %r = quantized::linear_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
530
+ return (%r) )";
531
+
532
+ std::string cat = R"(
533
+ graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype):
534
+ %input_dequant = aten::dequantize(%input_quant)
535
+ %r = aten::cat(%input_dequant, %dim)
536
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
537
+ return (%r_quant) )";
538
+
539
+ std::string quantized_cat = R"(
540
+ graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype):
541
+ %r_quant = quantized::cat(%input_quant, %dim, %r_scale, %r_zero_point)
542
+ return (%r_quant) )";
543
+
544
+ // aten::add
545
+ std::string add = R"(
546
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
547
+ %a_dequant = aten::dequantize(%a_quant)
548
+ %b_dequant = aten::dequantize(%b_quant)
549
+ %r_add = aten::add(%a_dequant, %b_dequant, %alpha)
550
+ %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype)
551
+ return (%r) )";
552
+
553
+ // TODO: add %dtype after when https://github.com/pytorch/pytorch/issues/34351
554
+ // is fixed
555
+ // quantized::add
556
+ std::string quantized_add = R"(
557
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
558
+ %r = quantized::add(%a_quant, %b_quant, %scale, %zero_point)
559
+ return (%r) )";
560
+
561
+ // aten::add_
562
+ std::string inplace_add = R"(
563
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
564
+ %a_dequant = aten::dequantize(%a_quant)
565
+ %b_dequant = aten::dequantize(%b_quant)
566
+ %r_add = aten::add_(%a_dequant, %b_dequant, %alpha)
567
+ %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype)
568
+ return (%r) )";
569
+
570
+ auto add_scalar = getBinaryOpScalarFusionInfo(
571
+ "aten::add",
572
+ {"%b_scalar", "%alpha"},
573
+ "quantized::add_scalar",
574
+ {"%b_scalar"},
575
+ {aten_add_alpha_is_one, input_b_is_scalar});
576
+
577
+ auto add_scalar_out = getBinaryOpScalarFusionInfo(
578
+ "aten::add_",
579
+ {"%b_scalar", "%alpha"},
580
+ "quantized::add_scalar_out",
581
+ {"%b_scalar", "%a_quant"},
582
+ {aten_add_alpha_is_one, input_b_is_scalar});
583
+
584
+ // quantized::add_scalar_relu -- fusing quantized::add_scalar
585
+ // and aten::relu
586
+ auto quantized_add_scalar_relu_pattern = R"(
587
+ graph(%a_quant, %b_scalar):
588
+ %r_add = quantized::add_scalar(%a_quant, %b_scalar)
589
+ %r = aten::relu(%r_add)
590
+ return (%r) )";
591
+
592
+ auto quantized_add_scalar_inplace_relu_pattern = R"(
593
+ graph(%a_quant, %b_scalar):
594
+ %r_add = quantized::add_scalar(%a_quant, %b_scalar)
595
+ %r = aten::relu_(%r_add)
596
+ return (%r) )";
597
+
598
+ auto quantized_add_scalar_relu_replacement = R"(
599
+ graph(%a_quant, %b_scalar):
600
+ %r = quantized::add_scalar_relu(%a_quant, %b_scalar)
601
+ return (%r) )";
602
+
603
+ // quantized::add_scalar_relu_out -- fusing quantized::add_scalarOut
604
+ // and aten::relu
605
+ auto quantized_add_scalar_relu_out_pattern = R"(
606
+ graph(%a_quant, %b_scalar):
607
+ %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant)
608
+ %r = aten::relu(%r_add)
609
+ return (%r) )";
610
+
611
+ auto quantized_add_scalar_inplace_relu_out_pattern = R"(
612
+ graph(%a_quant, %b_scalar):
613
+ %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant)
614
+ %r = aten::relu_(%r_add)
615
+ return (%r) )";
616
+
617
+ auto quantized_add_scalar_relu_out_replacement = R"(
618
+ graph(%a_quant, %b_scalar):
619
+ %r = quantized::add_scalar_relu_out(%a_quant, %b_scalar, %a_quant)
620
+ return (%r) )";
621
+
622
+ // quantized::batch_norm
623
+ std::string batch_norm = R"(
624
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
625
+ %a_dequant = aten::dequantize(%a_quant)
626
+ %r_bn = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7)
627
+ %r = aten::quantize_per_tensor(%r_bn, %scale, %zero_point, %scalar_type)
628
+ return (%r) )";
629
+ std::string quantized_batch_norm = R"(
630
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
631
+ %r = quantized::batch_norm(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point)
632
+ return (%r) )";
633
+
634
+ std::string batch_norm_relu = R"(
635
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
636
+ %a_dequant = aten::dequantize(%a_quant)
637
+ %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7)
638
+ %relu = aten::relu(%bn_out)
639
+ %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type)
640
+ return (%r) )";
641
+ std::string batch_norm_inplace_relu = R"(
642
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
643
+ %a_dequant = aten::dequantize(%a_quant)
644
+ %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7)
645
+ %relu = aten::relu_(%bn_out)
646
+ %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type)
647
+ return (%r) )";
648
+
649
+ std::string quantized_batch_norm_relu = R"(
650
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
651
+ %r = quantized::batch_norm_relu(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point)
652
+ return (%r) )";
653
+
654
+ // aten::mul
655
+ std::string mul = R"(
656
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
657
+ %a_dequant = aten::dequantize(%a_quant)
658
+ %b_dequant = aten::dequantize(%b_quant)
659
+ %r_mul = aten::mul(%a_dequant, %b_dequant)
660
+ %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype)
661
+ return (%r) )";
662
+
663
+ // aten::mul_
664
+ std::string inplace_mul = R"(
665
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
666
+ %a_dequant = aten::dequantize(%a_quant)
667
+ %b_dequant = aten::dequantize(%b_quant)
668
+ %r_mul = aten::mul_(%a_dequant, %b_dequant)
669
+ %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype)
670
+ return (%r) )";
671
+
672
+ // quantized::mul
673
+ std::string quantized_mul = R"(
674
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
675
+ %r = quantized::mul(%a_quant, %b_quant, %scale, %zero_point)
676
+ return (%r) )";
677
+
678
+ auto mul_scalar = getBinaryOpScalarFusionInfo(
679
+ "aten::mul",
680
+ {"%b_scalar"},
681
+ "quantized::mul_scalar",
682
+ {"%b_scalar"},
683
+ {input_b_is_scalar});
684
+
685
+ auto mul_scalar_out = getBinaryOpScalarFusionInfo(
686
+ "aten::mul_",
687
+ {"%b_scalar"},
688
+ "quantized::mul_scalar_out",
689
+ {"%b_scalar", "%a_quant"},
690
+ {input_b_is_scalar});
691
+
692
+ // quantized::mul_relu
693
+ std::string mul_relu = R"(
694
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
695
+ %a_dequant = aten::dequantize(%a_quant)
696
+ %b_dequant = aten::dequantize(%b_quant)
697
+ %r_mul = aten::mul(%a_dequant, %b_dequant)
698
+ %r_relu = aten::relu(%r_mul)
699
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
700
+ return (%r) )";
701
+
702
+ std::string mul_inplace_relu = R"(
703
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
704
+ %a_dequant = aten::dequantize(%a_quant)
705
+ %b_dequant = aten::dequantize(%b_quant)
706
+ %r_mul = aten::mul(%a_dequant, %b_dequant)
707
+ %r_relu = aten::relu_(%r_mul)
708
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
709
+ return (%r) )";
710
+
711
+ std::string inplace_mul_relu = R"(
712
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
713
+ %a_dequant = aten::dequantize(%a_quant)
714
+ %b_dequant = aten::dequantize(%b_quant)
715
+ %r_mul = aten::mul_(%a_dequant, %b_dequant)
716
+ %r_relu = aten::relu(%r_mul)
717
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
718
+ return (%r) )";
719
+
720
+ std::string inplace_mul_inplace_relu = R"(
721
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
722
+ %a_dequant = aten::dequantize(%a_quant)
723
+ %b_dequant = aten::dequantize(%b_quant)
724
+ %r_mul = aten::mul_(%a_dequant, %b_dequant)
725
+ %r_relu = aten::relu_(%r_mul)
726
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
727
+ return (%r) )";
728
+
729
+ std::string quantized_mul_relu = R"(
730
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
731
+ %r = quantized::mul_relu(%a_quant, %b_quant, %scale, %zero_point)
732
+ return (%r) )";
733
+
734
+ // quantized::mul_scalar_relu -- fusing quantized::mul_scalar
735
+ // and aten::relu
736
+ auto quantized_mul_scalar_relu_pattern = R"(
737
+ graph(%a_quant, %b_scalar):
738
+ %r_mul = quantized::mul_scalar(%a_quant, %b_scalar)
739
+ %r = aten::relu(%r_mul)
740
+ return (%r) )";
741
+
742
+ auto quantized_mul_scalar_inplace_relu_pattern = R"(
743
+ graph(%a_quant, %b_scalar):
744
+ %r_mul = quantized::mul_scalar(%a_quant, %b_scalar)
745
+ %r = aten::relu_(%r_mul)
746
+ return (%r) )";
747
+
748
+ auto quantized_mul_scalar_relu_replacement = R"(
749
+ graph(%a_quant, %b_scalar):
750
+ %r = quantized::mul_scalar_relu(%a_quant, %b_scalar)
751
+ return (%r) )";
752
+
753
+ // quantized::mul_scalar_relu_out -- fusing quantized::mul_scalarOut
754
+ // and aten::relu
755
+ auto quantized_mul_scalar_relu_out_pattern = R"(
756
+ graph(%a_quant, %b_scalar):
757
+ %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant)
758
+ %r = aten::relu(%r_mul)
759
+ return (%r) )";
760
+
761
+ auto quantized_mul_scalar_inplace_relu_out_pattern = R"(
762
+ graph(%a_quant, %b_scalar):
763
+ %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant)
764
+ %r = aten::relu_(%r_mul)
765
+ return (%r) )";
766
+
767
+ auto quantized_mul_scalar_relu_out_replacement = R"(
768
+ graph(%a_quant, %b_scalar):
769
+ %r = quantized::mul_scalar_relu_out(%a_quant, %b_scalar, %a_quant)
770
+ return (%r) )";
771
+
772
+ // quantized::elu
773
+ std::string elu = R"(
774
+ graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype):
775
+ %a_dequant = aten::dequantize(%a_quant)
776
+ %r = aten::elu(%a_dequant, %alpha, %scale, %input_scale)
777
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
778
+ return (%r_quant) )";
779
+
780
+ std::string quantized_elu = R"(
781
+ graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype):
782
+ %r_quant = quantized::elu(%a_quant, %r_scale, %r_zero_point, %alpha, %scale, %input_scale)
783
+ return (%r_quant) )";
784
+
785
+ std::string elu_ = R"(
786
+ graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype):
787
+ %a_dequant = aten::dequantize(%a_quant)
788
+ %r = aten::elu_(%a_dequant, %alpha, %scale, %input_scale)
789
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
790
+ return (%r_quant) )";
791
+
792
+ // ============= General Ops that inherit quantization parameters from input
793
+ // tensor =============
794
+ auto avg_pool1d = getInputTensorQParamOpFusionInfo(
795
+ "aten::avg_pool1d",
796
+ {"%kernel_size",
797
+ "%stride",
798
+ "%padding",
799
+ "%ceil_mode",
800
+ "%count_include_pad"});
801
+
802
+ auto avg_pool2d = getInputTensorQParamOpFusionInfo(
803
+ "aten::avg_pool2d",
804
+ {"%kernel_size",
805
+ "%stride",
806
+ "%padding",
807
+ "%ceil_mode",
808
+ "%count_include_pad",
809
+ "%divisor_override"});
810
+
811
+ std::string common_general_value_op = R"(
812
+ %r_scale : float = aten::q_scale(%a_quant)
813
+ %r_zero_point : int = aten::q_zero_point(%a_quant)
814
+ %r_dtype : int = prim::dtype(%a_quant)
815
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
816
+ return (%r_quant) )";
817
+
818
+ auto avg_pool3d = getInputTensorQParamOpFusionInfo(
819
+ "aten::avg_pool3d",
820
+ {"%kernel_size",
821
+ "%stride",
822
+ "%padding",
823
+ "%ceil_mode",
824
+ "%count_include_pad",
825
+ "%divisor_override"});
826
+
827
+ auto adaptive_avg_pool1d = getInputTensorQParamOpFusionInfo(
828
+ "aten::adaptive_avg_pool1d", {"%output_size"});
829
+
830
+ auto adaptive_avg_pool2d = getInputTensorQParamOpFusionInfo(
831
+ "aten::adaptive_avg_pool2d", {"%output_size"});
832
+
833
+ auto adaptive_avg_pool3d = getInputTensorQParamOpFusionInfo(
834
+ "aten::adaptive_avg_pool3d", {"%output_size"});
835
+
836
+ auto mean1 = getInputTensorQParamOpFusionInfo("aten::mean", {"%dim"});
837
+
838
+ auto mean2 = getInputTensorQParamOpFusionInfo(
839
+ "aten::mean", {"%dim", "%keepdim", "%out"});
840
+
841
+ auto upsample_nearest1d_vec = getInputTensorQParamOpFusionInfo(
842
+ "aten::upsample_nearest1d", {"%output_size", "%scale_factors"});
843
+
844
+ auto upsample_nearest2d_vec = getInputTensorQParamOpFusionInfo(
845
+ "aten::upsample_nearest2d", {"%output_size", "%scale_factors"});
846
+
847
+ auto upsample_nearest3d_vec = getInputTensorQParamOpFusionInfo(
848
+ "aten::upsample_nearest3d", {"%output_size", "%scale_factors"});
849
+
850
+ auto upsample_linear1d_vec = getInputTensorQParamOpFusionInfo(
851
+ "aten::upsample_linear1d",
852
+ {"%output_size", "%align_corners", "%scale_factors"});
853
+
854
+ auto upsample_bilinear2d_vec = getInputTensorQParamOpFusionInfo(
855
+ "aten::upsample_bilinear2d",
856
+ {"%output_size", "%align_corners", "%scale_factors"});
857
+
858
+ auto upsample_trilinear3d_vec = getInputTensorQParamOpFusionInfo(
859
+ "aten::upsample_trilinear3d",
860
+ {"%output_size", "%align_corners", "%scale_factors"});
861
+
862
+ auto upsample_nearest1d = getInputTensorQParamOpFusionInfo(
863
+ "aten::upsample_nearest1d", {"%output_size", "%scales"});
864
+
865
+ auto upsample_nearest2d = getInputTensorQParamOpFusionInfo(
866
+ "aten::upsample_nearest2d", {"%output_size", "%scale_h", "%scale_w"});
867
+
868
+ auto upsample_nearest3d = getInputTensorQParamOpFusionInfo(
869
+ "aten::upsample_nearest3d",
870
+ {"%output_size", "%scale_d", "%scale_h", "%scale_w"});
871
+
872
+ auto upsample_linear1d = getInputTensorQParamOpFusionInfo(
873
+ "aten::upsample_linear1d", {"%output_size", "%align_corners", "%scales"});
874
+
875
+ auto upsample_bilinear2d = getInputTensorQParamOpFusionInfo(
876
+ "aten::upsample_bilinear2d",
877
+ {"%output_size", "%align_corners", "%scale_h", "%scale_w"});
878
+
879
+ auto upsample_trilinear3d = getInputTensorQParamOpFusionInfo(
880
+ "aten::upsample_trilinear3d",
881
+ {"%output_size", "%align_corners", "%scale_d", "%scale_h", "%scale_w"});
882
+
883
+ auto clamp = getClampOpFusionInfo("aten::clamp", {"%min", "%max"});
884
+
885
+ auto hardtanh = getClampOpFusionInfo("aten::hardtanh", {"%min", "%max"});
886
+
887
+ auto hardtanh_ = getClampOpFusionInfo("aten::hardtanh_", {"%min", "%max"});
888
+
889
+ auto leaky_relu =
890
+ getInputTensorQParamOpFusionInfo("aten::leaky_relu", {"%negative_slope"});
891
+
892
+ auto leaky_relu_ = getInputTensorQParamOpFusionInfo(
893
+ "aten::leaky_relu_", {"%negative_slope"});
894
+
895
+ // Ops with fixed quantization parameters
896
+ auto hardsigmoid = getFixedQParamOpFusionInfo("aten::hardsigmoid", {}, false);
897
+
898
+ auto hardsigmoid_ =
899
+ getFixedQParamOpFusionInfo("aten::hardsigmoid_", {}, false);
900
+
901
+ auto sigmoid = getFixedQParamOpFusionInfo("aten::sigmoid", {}, false);
902
+
903
+ auto sigmoid_ = getFixedQParamOpFusionInfo("aten::sigmoid_", {}, false);
904
+
905
+ auto tanh = getFixedQParamOpFusionInfo("aten::tanh", {}, true);
906
+
907
+ auto tanh_ = getFixedQParamOpFusionInfo("aten::tanh_", {}, true);
908
+
909
+ auto hardswish = getObservedQParamOpFusionInfo(
910
+ "aten::hardswish", "quantized::hardswish", {}, {});
911
+
912
+ auto hardswish_ = getObservedQParamOpFusionInfo(
913
+ "aten::hardswish_", "quantized::hardswish", {}, {});
914
+
915
+ auto layer_norm = getObservedQParamOpFusionInfo(
916
+ "aten::layer_norm",
917
+ "quantized::layer_norm",
918
+ {"%normalized_shape", "%weight", "%bias", "%eps", "%cudnn_enabled"},
919
+ {"%normalized_shape", "%weight", "%bias", "%eps"});
920
+
921
+ auto group_norm = getObservedQParamOpFusionInfo(
922
+ "aten::group_norm",
923
+ "quantized::group_norm",
924
+ {"%num_groups", "%weight", "%bias", "%eps", "%cudnn_enabled"},
925
+ {"%num_groups", "%weight", "%bias", "%eps"});
926
+
927
+ auto instance_norm = getObservedQParamOpFusionInfo(
928
+ "aten::instance_norm",
929
+ "quantized::instance_norm",
930
+ {"%weight",
931
+ "%bias",
932
+ "%running_mean",
933
+ "%running_var",
934
+ "%use_input_stats",
935
+ "%momentum",
936
+ "%eps",
937
+ "%cudnn_enabled"},
938
+ {"%weight", "%bias", "%eps"});
939
+
940
+ return {
941
+ {"quantized::conv1d", std::move(conv1d), std::move(quantized_conv1d)},
942
+ {"quantized::conv1d_relu", std::move(conv1d_relu), quantized_conv1d_relu},
943
+ {"quantized::conv1d_relu",
944
+ std::move(conv1d_inplace_relu),
945
+ std::move(quantized_conv1d_relu)},
946
+ {"quantized::conv2d", std::move(conv2d), std::move(quantized_conv2d)},
947
+ {"quantized::conv2d_relu", std::move(conv2d_relu), quantized_conv2d_relu},
948
+ {"quantized::conv2d_relu",
949
+ std::move(conv2d_inplace_relu),
950
+ std::move(quantized_conv2d_relu)},
951
+ {"quantized::conv3d", std::move(conv3d), std::move(quantized_conv3d)},
952
+ {"quantized::conv3d_relu", std::move(conv3d_relu), quantized_conv3d_relu},
953
+ {"quantized::conv3d_relu",
954
+ std::move(conv3d_inplace_relu),
955
+ std::move(quantized_conv3d_relu)},
956
+ {"quantized::conv_transpose1d",
957
+ std::move(conv_transpose1d),
958
+ std::move(quantized_conv_transpose1d)},
959
+ {"quantized::conv_transpose2d",
960
+ std::move(conv_transpose2d),
961
+ std::move(quantized_conv_transpose2d)},
962
+ {"quantized::linear", std::move(linear), std::move(quantized_linear)},
963
+ {"quantized::linear_relu", std::move(linear_relu), quantized_linear_relu},
964
+ {"quantized::linear_relu",
965
+ std::move(linear_inplace_relu),
966
+ std::move(quantized_linear_relu)},
967
+ {"quantized::add_relu",
968
+ std::move(add_relu),
969
+ quantized_add_relu,
970
+ {aten_add_alpha_is_one}},
971
+ {"quantized::add_relu",
972
+ std::move(add_inplace_relu),
973
+ quantized_add_relu,
974
+ {aten_add_alpha_is_one}},
975
+ {"quantized::add_relu",
976
+ std::move(inplace_add_relu),
977
+ quantized_add_relu,
978
+ {aten_add_alpha_is_one}},
979
+ {"quantized::add_relu",
980
+ std::move(inplace_add_inplace_relu),
981
+ std::move(quantized_add_relu),
982
+ {aten_add_alpha_is_one}},
983
+ std::move(add_scalar),
984
+ std::move(add_scalar_out),
985
+ // note that these must come after quantized::add_scalar and
986
+ // quantized::add_scalar_out patterns
987
+ {"quantized::add_scalar_relu",
988
+ quantized_add_scalar_relu_pattern,
989
+ quantized_add_scalar_relu_replacement},
990
+ {"quantized::add_scalar_relu",
991
+ quantized_add_scalar_inplace_relu_pattern,
992
+ quantized_add_scalar_relu_replacement},
993
+ {"quantized::add_scalar_relu_out",
994
+ quantized_add_scalar_relu_out_pattern,
995
+ quantized_add_scalar_relu_out_replacement},
996
+ {"quantized::add_scalar_relu_out",
997
+ quantized_add_scalar_inplace_relu_out_pattern,
998
+ quantized_add_scalar_relu_out_replacement},
999
+ {"quantized::add",
1000
+ std::move(add),
1001
+ quantized_add,
1002
+ {aten_add_alpha_is_one}},
1003
+ {"quantized::add",
1004
+ std::move(inplace_add),
1005
+ std::move(quantized_add),
1006
+ {aten_add_alpha_is_one}},
1007
+ {"quantized::cat", std::move(cat), std::move(quantized_cat)},
1008
+ {"quantized::batch_norm",
1009
+ std::move(batch_norm),
1010
+ std::move(quantized_batch_norm)},
1011
+ {"quantized::batch_norm_relu",
1012
+ std::move(batch_norm_relu),
1013
+ quantized_batch_norm_relu},
1014
+ {"quantized::batch_norm_relu",
1015
+ std::move(batch_norm_inplace_relu),
1016
+ std::move(quantized_batch_norm_relu)},
1017
+ std::move(mul_scalar),
1018
+ std::move(mul_scalar_out),
1019
+ // note that these must come after quantized::mul_scalar and
1020
+ // quantized::mul_scalar_out patterns
1021
+ {"quantized::mul_scalar_relu",
1022
+ quantized_mul_scalar_relu_pattern,
1023
+ quantized_mul_scalar_relu_replacement},
1024
+ {"quantized::mul_scalar_relu",
1025
+ quantized_mul_scalar_inplace_relu_pattern,
1026
+ quantized_mul_scalar_relu_replacement},
1027
+ {"quantized::mul_scalar_relu_out",
1028
+ quantized_mul_scalar_relu_out_pattern,
1029
+ quantized_mul_scalar_relu_out_replacement},
1030
+ {"quantized::mul_scalar_relu_out",
1031
+ quantized_mul_scalar_inplace_relu_out_pattern,
1032
+ quantized_mul_scalar_relu_out_replacement},
1033
+ {"quantized::mul_relu", std::move(mul_relu), quantized_mul_relu},
1034
+ {"quantized::mul_relu", std::move(mul_inplace_relu), quantized_mul_relu},
1035
+ {"quantized::mul_relu", std::move(inplace_mul_relu), quantized_mul_relu},
1036
+ {"quantized::mul_relu",
1037
+ std::move(inplace_mul_inplace_relu),
1038
+ std::move(quantized_mul_relu)},
1039
+ {"quantized::mul", std::move(mul), quantized_mul},
1040
+ {"quantized::mul", std::move(inplace_mul), std::move(quantized_mul)},
1041
+ std::move(hardswish),
1042
+ std::move(hardswish_),
1043
+ std::move(layer_norm),
1044
+ std::move(group_norm),
1045
+ std::move(instance_norm),
1046
+ {"quantized::elu", std::move(elu), quantized_elu},
1047
+ {"quantized::elu_", std::move(elu_), std::move(quantized_elu)},
1048
+ std::move(avg_pool1d),
1049
+ std::move(avg_pool2d),
1050
+ std::move(avg_pool3d),
1051
+ std::move(adaptive_avg_pool1d),
1052
+ std::move(adaptive_avg_pool2d),
1053
+ std::move(adaptive_avg_pool3d),
1054
+ std::move(mean1),
1055
+ std::move(mean2),
1056
+ std::move(upsample_nearest1d),
1057
+ std::move(upsample_nearest2d),
1058
+ std::move(upsample_nearest3d),
1059
+ std::move(upsample_linear1d),
1060
+ std::move(upsample_bilinear2d),
1061
+ std::move(upsample_trilinear3d),
1062
+ std::move(upsample_nearest1d_vec),
1063
+ std::move(upsample_nearest2d_vec),
1064
+ std::move(upsample_nearest3d_vec),
1065
+ std::move(upsample_linear1d_vec),
1066
+ std::move(upsample_bilinear2d_vec),
1067
+ std::move(upsample_trilinear3d_vec),
1068
+ std::move(clamp),
1069
+ std::move(hardtanh),
1070
+ std::move(hardtanh_),
1071
+ std::move(leaky_relu),
1072
+ std::move(leaky_relu_),
1073
+ // fixed qparam ops
1074
+ std::move(hardsigmoid),
1075
+ std::move(hardsigmoid_),
1076
+ std::move(sigmoid),
1077
+ std::move(sigmoid_),
1078
+ std::move(tanh),
1079
+ std::move(tanh_),
1080
+ };
1081
+ }
1082
+
1083
+ inline std::vector<QuantFusionInfo>
1084
+ dynamic_quantized_linear_pattern_and_replacements() {
1085
+ std::string linear_dynamic = R"(
1086
+ graph(%packed_params, %a):
1087
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
1088
+ %w_dequant = aten::dequantize(%w_quant)
1089
+ %r = aten::linear(%a, %w_dequant, %b)
1090
+ return (%r) )";
1091
+
1092
+ // This pattern ignores reduce range
1093
+ // Set the reduce range to default to true, since qnnpack backend ignores this
1094
+ // argument.
1095
+ std::string quantized_linear_dynamic = R"(
1096
+ graph(%packed_params, %a):
1097
+ %reduce_range : bool = prim::Constant[value=1]()
1098
+ %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range)
1099
+ return (%r) )";
1100
+
1101
+ return {
1102
+ {"quantized::linear_dynamic",
1103
+ std::move(linear_dynamic),
1104
+ std::move(quantized_linear_dynamic)},
1105
+ };
1106
+ }
1107
+
1108
+ static std::vector<QuantFusionInfo>
1109
+ dynamic_quant_fusion_pattern_and_replacements() {
1110
+ std::string linear_dynamic = R"(
1111
+ graph(%packed_params, %a, %reduce_range, %a_dtype):
1112
+ %a_scale : float, %a_zero_point : int = aten::_choose_qparams_per_tensor(%a, %reduce_range)
1113
+ %a_quant = aten::quantize_per_tensor(%a, %a_scale, %a_zero_point, %a_dtype)
1114
+ %a_dequant = aten::dequantize(%a_quant)
1115
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
1116
+ %w_dequant = aten::dequantize(%w_quant)
1117
+ %r = aten::linear(%a_dequant, %w_dequant, %b)
1118
+ return (%r) )";
1119
+
1120
+ std::string quantized_linear_dynamic = R"(
1121
+ graph(%packed_params, %a, %reduce_range, %a_dtype):
1122
+ %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range)
1123
+ return (%r) )";
1124
+
1125
+ std::string linear_dynamic_fp16 = R"(
1126
+ graph(%packed_params, %a):
1127
+ %w_unpacked : Tensor, %b : Tensor? = quantized::linear_unpack_fp16(%packed_params)
1128
+ %r = aten::linear(%a, %w_unpacked, %b)
1129
+ return (%r) )";
1130
+
1131
+ std::string quantized_linear_dynamic_fp16 = R"(
1132
+ graph(%packed_params, %a):
1133
+ %r = quantized::linear_dynamic_fp16(%a, %packed_params)
1134
+ return (%r) )";
1135
+
1136
+ return {
1137
+ {"quantized::linear_dynamic",
1138
+ std::move(linear_dynamic),
1139
+ std::move(quantized_linear_dynamic)},
1140
+ {"quantized::linear_dynamic_fp16",
1141
+ std::move(linear_dynamic_fp16),
1142
+ std::move(quantized_linear_dynamic_fp16)},
1143
+ };
1144
+ }
1145
+
1146
+ static std::vector<QuantFusionInfo> linear_prepack_unpack_patterns() {
1147
+ std::string linear_with_quant = R"(
1148
+ graph(%a_dequant, %w_quant, %b):
1149
+ %w_dequant = aten::dequantize(%w_quant)
1150
+ %r = aten::linear(%a_dequant, %w_dequant, %b)
1151
+ return (%r) )";
1152
+
1153
+ std::string linear_with_quant_prepack = R"(
1154
+ graph(%a_dequant, %w_quant, %b):
1155
+ %packed_params = quantized::linear_prepack(%w_quant, %b)
1156
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack(%packed_params)
1157
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1158
+ %r = aten::linear(%a_dequant, %w_dequant, %b_unpacked)
1159
+ return (%r) )";
1160
+ std::string linear_fp16_with_cast = R"(
1161
+ graph(%w, %a_dq, %b):
1162
+ %fp16_tensor = aten::_saturate_weight_to_fp16(%w)
1163
+ %r = aten::linear(%a_dq, %fp16_tensor, %b)
1164
+ return (%r) )";
1165
+ std::string linear_fp16_with_prepack = R"(
1166
+ graph(%w, %a_dq, %b):
1167
+ %packed_params = quantized::linear_prepack_fp16(%w, %b)
1168
+ %w_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack_fp16(%packed_params)
1169
+ %r = aten::linear(%a_dq, %w_unpacked, %b_unpacked)
1170
+ return (%r) )";
1171
+
1172
+ return {
1173
+ {"linear_prepack_unpack",
1174
+ std::move(linear_with_quant),
1175
+ std::move(linear_with_quant_prepack)},
1176
+ {"linear_fp16_prepack_unpack",
1177
+ std::move(linear_fp16_with_cast),
1178
+ std::move(linear_fp16_with_prepack)},
1179
+ };
1180
+ }
1181
+
1182
+ static std::vector<QuantFusionInfo> conv_prepack_unpack_patterns() {
1183
+ std::string conv1d_with_quant = R"(
1184
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1185
+ %w_dequant = aten::dequantize(%w_quant)
1186
+ %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
1187
+ return (%r) )";
1188
+
1189
+ std::string conv1d_with_quant_prepack = R"(
1190
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1191
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv1d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups)
1192
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv1d_unpack(%packed_params)
1193
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1194
+ %r = aten::conv1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups)
1195
+ return (%r) )";
1196
+
1197
+ std::string conv2d_with_quant = R"(
1198
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1199
+ %w_dequant = aten::dequantize(%w_quant)
1200
+ %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
1201
+ return (%r) )";
1202
+
1203
+ std::string conv2d_with_quant_prepack = R"(
1204
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1205
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv2d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups)
1206
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv2d_unpack(%packed_params)
1207
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1208
+ %r = aten::conv2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups)
1209
+ return (%r) )";
1210
+
1211
+ std::string conv3d_with_quant = R"(
1212
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1213
+ %w_dequant = aten::dequantize(%w_quant)
1214
+ %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
1215
+ return (%r) )";
1216
+
1217
+ std::string conv3d_with_quant_prepack = R"(
1218
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1219
+ %packed_params : __torch__.torch.classes.quantized.Conv3dPackedParamsBase = quantized::conv3d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups)
1220
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv3d_unpack(%packed_params)
1221
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1222
+ %r = aten::conv3d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups)
1223
+ return (%r) )";
1224
+
1225
+ std::string conv_transpose1d_with_quant = R"(
1226
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1227
+ %w_dequant = aten::dequantize(%w_quant)
1228
+ %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
1229
+ return (%r) )";
1230
+
1231
+ std::string conv_transpose1d_with_quant_prepack = R"(
1232
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1233
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose1d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups)
1234
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose1d_unpack(%packed_params)
1235
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1236
+ %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation)
1237
+ return (%r) )";
1238
+
1239
+ std::string conv_transpose2d_with_quant = R"(
1240
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1241
+ %w_dequant = aten::dequantize(%w_quant)
1242
+ %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
1243
+ return (%r) )";
1244
+
1245
+ std::string conv_transpose2d_with_quant_prepack = R"(
1246
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1247
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose2d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups)
1248
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose2d_unpack(%packed_params)
1249
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1250
+ %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation)
1251
+ return (%r) )";
1252
+
1253
+ return {
1254
+ {"conv1d_prepack_unpack",
1255
+ std::move(conv1d_with_quant),
1256
+ std::move(conv1d_with_quant_prepack)},
1257
+ {"conv2d_prepack_unpack",
1258
+ std::move(conv2d_with_quant),
1259
+ std::move(conv2d_with_quant_prepack)},
1260
+ {"conv3d_prepack_unpack",
1261
+ std::move(conv3d_with_quant),
1262
+ std::move(conv3d_with_quant_prepack)},
1263
+ {"conv_transpose1d_prepack_unpack",
1264
+ std::move(conv_transpose1d_with_quant),
1265
+ std::move(conv_transpose1d_with_quant_prepack)},
1266
+ {"conv_transpose2d_prepack_unpack",
1267
+ std::move(conv_transpose2d_with_quant),
1268
+ std::move(conv_transpose2d_with_quant_prepack)}};
1269
+ }
1270
+
1271
+ } // namespace jit
1272
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <ostream>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Quantization type (dynamic quantization, static quantization).
9
+ // Should match the Python enum in quantize_jit.py
10
+ enum QuantType : std::uint8_t { DYNAMIC = 0, STATIC };
11
+
12
+ std::ostream& operator<<(std::ostream& os, QuantType t);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ using PrePackParamFilterFn = std::function<bool(Node*)>;
11
+
12
+ TORCH_API std::unordered_set<std::string> RegisterPrePackParams(
13
+ Module& m,
14
+ const std::string& method_name,
15
+ const PrePackParamFilterFn& is_packed_param,
16
+ const std::string& attr_prefix);
17
+
18
+ TORCH_API std::string joinPaths(const std::vector<std::string>& paths);
19
+ } // namespace jit
20
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // updates the types of tuples according to the type of their current inputs.
9
+ TORCH_API void RefineTupleTypes(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Considering prim::RaiseException nodes unreachable, simplify prim::If nodes
9
+ // when one of the branches contains prim::RaiseException.
10
+ //
11
+ // This pass is illegal in general case as the modified graph might not throw
12
+ // an exception that the original graph would throw. The purpose of the pass is
13
+ // to cleanup the graph in a "risky" way by removing pathways leading to
14
+ // RaiseExceptions nodes. In some sense, this pass could be considered as a
15
+ // "Release" mode, while the original graph was in a "Debug" mode.
16
+ // The pass should only be used when such transformation is guaranteed to be
17
+ // safe by some other mechanisms. For instance, when we know exact shapes of
18
+ // tensors flowing through the graph and tensors with such shapes never cause
19
+ // exceptions.
20
+ TORCH_API void EliminateExceptions(std::shared_ptr<Graph>& graph);
21
+
22
+ } // namespace jit
23
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void RemoveExpands(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void RemoveRedundantProfiles(std::shared_ptr<Graph>& graph);
9
+ TORCH_API void RemoveRedundantProfiles(Block* block, AliasDb& db);
10
+ } // namespace jit
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Find the valid upgrader graph for the upgrader and cache the result
9
+ // for later lookups. Will error out if there is no valid upgrader graph
10
+ // provided for the upgrader name.
11
+ std::shared_ptr<Graph> getUpgraderGraph(const std::string& upgrader_name);
12
+
13
+ TORCH_API void ReplaceOldOperatorsWithUpgraders(std::shared_ptr<Graph> graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/restore_mutation.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/jit/ir/alias_analysis.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // A map which stores if an activation operator can perform type promotion
13
+ const std::unordered_map<Symbol, bool> activation_type_promotion_mapping = {
14
+ {aten::sigmoid, true},
15
+ {aten::tanh, true},
16
+ {aten::celu, false},
17
+ {aten::elu, false},
18
+ {aten::gelu, false},
19
+ {aten::glu, false},
20
+ {aten::hardshrink, false},
21
+ {aten::hardsigmoid, false},
22
+ {aten::hardswish, false},
23
+ {aten::hardtanh, false},
24
+ {aten::leaky_relu, false},
25
+ {aten::prelu, false},
26
+ {aten::relu6, false},
27
+ {aten::relu, false},
28
+ {aten::rrelu, false},
29
+ {aten::selu, false},
30
+ {aten::silu, false}};
31
+
32
+ class FunctionalToInplaceRewriter {
33
+ public:
34
+ FunctionalToInplaceRewriter(std::shared_ptr<Graph> graph);
35
+
36
+ bool FunctionalToInplace(Block* block);
37
+
38
+ private:
39
+ AliasDb* getOrCreateAliasDb() {
40
+ if (!aliasDb_) {
41
+ aliasDb_ = std::make_unique<AliasDb>(graph_);
42
+ }
43
+ return aliasDb_.get();
44
+ }
45
+
46
+ bool CanBeInplace(Node* node);
47
+
48
+ std::unique_ptr<AliasDb> aliasDb_ = nullptr;
49
+ std::shared_ptr<Graph> graph_;
50
+ };
51
+
52
+ // A common application scenario is to apply InplaceToFunctionalActivation
53
+ // before some JIT optimization passes, so that those passes are less
54
+ // constrained by in-place ops. After those passes are done, we can call
55
+ // FunctionalToInplaceActivation to recover in-place activation ops,
56
+ // so that we won't lose the performance benefit coming from memory reduction.
57
+
58
+ // Replaces functional aten activation ops with their in-place equivalents
59
+ TORCH_API bool FunctionalToInplaceActivation(
60
+ const std::shared_ptr<Graph>& graph);
61
+
62
+ } // namespace jit
63
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // propagate autograd zero information through a gradient graph and
9
+ // remove grad_of blocks if present.
10
+ // Note: this is a very limited pass. It only propagates autograd zeros for
11
+ // operations generated by the symbolic autodiff code and cleans up
12
+ // AutogradAdds when possible. Outputs of other nodes are conservatively
13
+ // marked Unknown and not optimized.
14
+ TORCH_API void specializeAutogradZero(std::shared_ptr<Graph> g);
15
+
16
+ struct ProfilingRecord;
17
+
18
+ TORCH_API void InsertProfileNodesForSpecializeAutogradZero(ProfilingRecord* pr);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/passes/symbolic_shape_analysis.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct TORCH_API CanonicalizedSymbolicShape {
10
+ // TODO: Consider in the future if it is reasonable to
11
+ // merge code with SymbolicShape or VaryingShape while keeping
12
+ // the two not implicitly convertable (and cause bugs).
13
+ CanonicalizedSymbolicShape(
14
+ const c10::SymbolicShape& orig_shape,
15
+ std::unordered_map<int64_t, int64_t>& ss_map) {
16
+ init(orig_shape, ss_map);
17
+ }
18
+
19
+ CanonicalizedSymbolicShape(c10::SymbolicShape& orig_shape) {
20
+ std::unordered_map<int64_t, int64_t> new_ssmap;
21
+ init(orig_shape, new_ssmap);
22
+ }
23
+
24
+ size_t hash() const;
25
+
26
+ c10::SymbolicShape toSymbolicShape(
27
+ std::unordered_map<int64_t, int64_t>& inverse_ss_map) const;
28
+
29
+ TORCH_API friend bool operator==(
30
+ const CanonicalizedSymbolicShape& a,
31
+ const CanonicalizedSymbolicShape& b);
32
+
33
+ private:
34
+ c10::optional<std::vector<int64_t>> values_;
35
+
36
+ void init(
37
+ const c10::SymbolicShape& orig_shape,
38
+ std::unordered_map<int64_t, int64_t>& ss_map);
39
+ };
40
+
41
+ // SHAPE CACHE API
42
+ TORCH_API c10::optional<std::vector<at::SymbolicShape>>
43
+ get_cached_shape_function(
44
+ const FunctionSchema* schema,
45
+ const std::vector<SSAInput>& arg_vec);
46
+
47
+ TORCH_API void cache_shape_function(
48
+ const FunctionSchema* schema,
49
+ const std::vector<SSAInput>& arg_vec,
50
+ const std::vector<at::SymbolicShape>& ret_vec);
51
+
52
+ // For use in test code
53
+ TORCH_API void clear_shape_cache();
54
+ TORCH_API size_t get_shape_cache_size();
55
+
56
+ } // namespace jit
57
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+ #include <string>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Verify that alias annotations are correct. See impl for definition of
13
+ // "correct".
14
+ //
15
+ // This function expects a graph with a single op with `unqualifiedOpName`, plus
16
+ // the inputs that you would otherwise have passed to the graph executor.
17
+ TORCH_API void checkAliasAnnotation(
18
+ const std::shared_ptr<Graph>& graph,
19
+ std::vector<IValue> pythonInputs,
20
+ const std::string& unqualifiedOpName);
21
+ } // namespace jit
22
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <c10/util/flat_hash_map.h>
7
+ #include <c10/util/sparse_bitset.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+ #include <torch/csrc/jit/ir/type_hashing.h>
10
+ #include <memory>
11
+ #include <unordered_map>
12
+ #include <unordered_set>
13
+ #include <vector>
14
+
15
+ #include <torch/csrc/Export.h>
16
+
17
+ // Uses a compressed index representation for faster comparisons
18
+ typedef c10::SparseBitVector<256> MemoryLocations;
19
+ namespace torch {
20
+ namespace jit {
21
+
22
+ struct Element;
23
+ struct Value;
24
+ class MemoryDAG;
25
+
26
+ using AliasTypeSet = std::vector<TypePtr>;
27
+
28
+ /**
29
+ * Helper to build up the points-to graph.
30
+ *
31
+ * We separate the "building" into a different class because it allows us to
32
+ * cache internally to MemoryDAG without worrying about how the DAG structure
33
+ * is mutated.
34
+ */
35
+ class TORCH_API MemoryDAGBuilder {
36
+ public:
37
+ MemoryDAGBuilder() = default;
38
+ MemoryDAGBuilder(const MemoryDAGBuilder&) = delete;
39
+ MemoryDAGBuilder& operator=(const MemoryDAGBuilder&) = delete;
40
+
41
+ // Make `from` point at `to`.
42
+ void makePointerTo(Element* from, Element* to);
43
+
44
+ void addToContainedElements(Element* contained, Element* container);
45
+
46
+ // Make a fresh Element (i.e. an Element that doesn't point to anything) and
47
+ // return it.
48
+ Element* makeFreshValue(const Value* v);
49
+
50
+ friend MemoryDAG;
51
+
52
+ private:
53
+ // `MemoryDAGBuilder` builds up `indexToElementMap_`, then uses
54
+ // the map to construct the `MemoryDAG`
55
+ std::vector<std::unique_ptr<Element>> indexToElementMap_;
56
+ };
57
+
58
+ // class MemoryDAG
59
+ //
60
+ // This class tracks the "A points to B" graph for all values. It is used by
61
+ // AliasDb to provide a higher-level API.
62
+ //
63
+ // We maintain a DAG where:
64
+ // - Vertices (called "Elements") represent Values and
65
+ // other aliasing entities (e.g. the stuff inside a list)
66
+ // - Edges represent a "points-to" relationship.
67
+ //
68
+ // Leaves in this DAG are entities that don't point to anything, and thus
69
+ // correspond to unique "memory locations".
70
+ //
71
+ // So, by traversing the "points-to" graph to the leaves, you can determine
72
+ // which memory locations an element may point to.
73
+ class TORCH_API MemoryDAG {
74
+ public:
75
+ explicit MemoryDAG(std::unique_ptr<MemoryDAGBuilder> builder)
76
+ : indexToElementMap_(std::move(builder->indexToElementMap_)) {}
77
+ // explicitly delete copy constructor because otherwise windows build is
78
+ // confused for an exported class see
79
+ // https://stackoverflow.com/a/51033485/105137
80
+ MemoryDAG(const MemoryDAG&) = delete;
81
+ MemoryDAG& operator=(const MemoryDAG&) = delete;
82
+
83
+ // Return the unique memory locations that `Element` might represent.
84
+ const MemoryLocations& getMemoryLocations(const Element* e) const;
85
+
86
+ // Do `a` and `b` potentially share a memory location?
87
+ bool mayAlias(const Element* a, const Element* b) const;
88
+
89
+ // Does `a` hold reference to any memory that is stored in `b`, or vice versa?
90
+ bool mayContainAlias(const Element* a, const Element* b) const;
91
+
92
+ bool mayContainAlias(const Element* a, const at::ArrayRef<Element*> b) const;
93
+
94
+ bool mayContainAlias(
95
+ const at::ArrayRef<Element*> a,
96
+ const at::ArrayRef<Element*> b) const;
97
+
98
+ // Converts from the compressed index representation
99
+ const Element* fromIndex(unsigned x) const;
100
+ Element* fromIndex(unsigned x);
101
+ void collectAllContainedMemoryLocations(
102
+ const Element* elem,
103
+ MemoryLocations& cont) const;
104
+
105
+ /**
106
+ * The following methods are special cases where we need to mutate the
107
+ * internals of MemoryDAG for efficiency reasons. Don't call them unless you
108
+ * know what you're doing! In particular, don't add new mutating methods
109
+ * without ensuring that you are maintaining cache consistency for memory
110
+ * locations.
111
+ */
112
+
113
+ // Adding wildcards can trigger extremely expensive cache invalidations. This
114
+ // method adds them in a more efficient cache-aware way.
115
+ void setWildcards(
116
+ const std::unordered_set<const Value*>& wildcards,
117
+ const ska::flat_hash_map<const Value*, Element*>& elementMap,
118
+ const std::function<Element*(const Value*)>& getWildcardElement);
119
+ Element* unsafeMakeFreshValue(const Value* v);
120
+
121
+ private:
122
+ const MemoryLocations& getAllContainedMemoryLocations(
123
+ const Element* elem) const;
124
+ void collectAllContainedMemoryLocationsImpl(
125
+ const Element* elem,
126
+ MemoryLocations& cont) const;
127
+ std::vector<std::unique_ptr<Element>> indexToElementMap_;
128
+ };
129
+
130
+ // `Element` represents a vertex in the points-to graph. It represents
131
+ // anything that could have an aliasing relationship--mostly IR
132
+ // `Value`s, but also wildcards or the type inside a container (e.g. `T`
133
+ // in `List[T]`)
134
+ struct Element {
135
+ Element(const Value* value_, unsigned index_);
136
+ // wildcard constructor
137
+ explicit Element(unsigned index_);
138
+
139
+ // Index into the owning DAG's bit vector that represents this element.
140
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
141
+ unsigned index;
142
+
143
+ // All elements that this element *may* point to. It's possible to have
144
+ // multiple elements that you might point to due to control flow/complex ops
145
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
146
+ MemoryLocations pointsTo;
147
+ // Backreference for points-to.
148
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
149
+ MemoryLocations pointedFrom;
150
+
151
+ // Elements can contain other elements (e.g. List[Tensor])
152
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
153
+ MemoryLocations containedElements;
154
+
155
+ // The values that this element corresponds to. May be empty if this element
156
+ // doesn't represent a first-class value.
157
+ // This is for debug information only.
158
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
159
+ std::unordered_set<const Value*> values;
160
+
161
+ private:
162
+ // Make `from` point at `to`.
163
+ void makePointerTo(Element* from, Element* to);
164
+
165
+ friend class MemoryDAG;
166
+ // We memoize the results of `getMemoryLocations` to speed up queries.
167
+ // A nullopt means that this cache is not yet populated. Since `MemoryDAG` is
168
+ // immutable, this cache should never need to be invalidated.
169
+ mutable c10::optional<MemoryLocations> cachedMemoryLocations_;
170
+
171
+ mutable c10::optional<MemoryLocations> cachedAllContainedMemoryLocations_;
172
+ };
173
+
174
+ } // namespace jit
175
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/op_registry.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ // Moved from shape_analysis.cpp
10
+
11
+ // Requirements:
12
+ // dims : preserved from the first argument
13
+ // scalar type : preserved from the first argument (doesn't have to
14
+ // match other arguments)
15
+ // device : always matching and preserved
16
+ // tensor inputs : *
17
+ // tensor outputs : 1
18
+ // NB: those ops (with slight adjustments) are good candidates for restarts.
19
+ // Knowing the type and device of weights or biases is usually enough to
20
+ // infer the output type.
21
+ std::shared_ptr<OperatorSet> nn_ops_first_input_preserving();
22
+
23
+ // Requirements:
24
+ // dims : Changed from first argument
25
+ // scalar type : preserved from the first argument
26
+ // device : always matching and preserved
27
+ // tensor inputs : 1
28
+ // tensor outputs : 1
29
+ std::shared_ptr<OperatorSet> ops_one_tensor_in_shape_transform();
30
+ } // namespace jit
31
+ } // namespace torch