applied-ai-018 commited on
Commit
014ad1c
·
verified ·
1 Parent(s): ef521bb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h +322 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h +184 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h +61 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h +201 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h +25 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h +1841 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir_views.h +164 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/irparser.h +40 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/named_value.h +84 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/node_hashing.h +17 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/scope.h +220 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h +74 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h +20 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h +57 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h +196 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h +136 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h +53 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h +86 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h +112 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h +23 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h +30 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h +25 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h +32 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h +63 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h +38 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h +55 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h +54 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/upgrader_mobile.h +43 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h +35 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h +1115 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h +119 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h +50 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h +97 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_sugared_value.h +376 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h +9 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h +150 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h +80 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h +128 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h +283 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h +102 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h +36 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h +104 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h +347 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h +91 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h +29 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h +61 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h +129 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h +115 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h +22 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h +934 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/alias_info.h>
4
+ #include <c10/util/flat_hash_map.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/type_hashing.h>
7
+ #include <torch/csrc/jit/passes/create_functional_graphs.h>
8
+ #include <torch/csrc/jit/passes/utils/memory_dag.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * Alias analysis pass.
15
+ *
16
+ * This pass produces an AliasDb that contains aliasing and mutation
17
+ * information about the graph. Users can use this information to determine
18
+ * whether mutations to the graph are safe, i.e. they don't reorder/change
19
+ * nodes in a way that affects output.
20
+ *
21
+ * Every value with a mutable type (Tensors, Lists, Tuples, etc.) will be
22
+ * associated with one or more "alias sets". If two values share an alias set,
23
+ * that means they may alias, implying that a mutation to one value cannot be
24
+ * reordered past a use of the other. Only reordering two reads of an alias set
25
+ * is considered safe.
26
+ *
27
+ * There is a special alias set called the "wildcard set", which indicates that
28
+ * we're not sure what this value may alias. To be conservative, we consider the
29
+ * wildcard alias set as potentially aliasing any other wildcard value within
30
+ * the same type class. Whenever a value becomes contained by another value,
31
+ * such as when a Tensor is appended to a List[Tensor], the contained element
32
+ * becomes part of the wildcard set.
33
+ *
34
+ * Values that contain other mutable types, such as List[Tensor], are
35
+ * initialized as containing the Wildcard set for all contained mutable types.
36
+ *
37
+ * The AliasDb API references the idea of "mutable" vs "immutable"
38
+ * types. "Mutable" means that the object's value can change, while
39
+ * "immutable" means that the value is fixed. (For example, `List` is
40
+ * mutable, so you can add and delete elements from it. On the other
41
+ * hand, you can't modify a Tuple once you create it, making `Tuple` an
42
+ * immutable container.)
43
+ *
44
+ * `isFrozen` - if the Module is frozen then consider attributes as freshly
45
+ * created objects. Freezing API invokes alias analysis to check if they are
46
+ * mutated internally.
47
+ *
48
+ * `descendFunctionCalls` - recursively analyze function and method calls
49
+ * instead of conservative analysis. Generally analysis should be done after
50
+ * inlining so the implmentation for recursive analysis is unoptimized.
51
+ */
52
+ class AliasDb {
53
+ public:
54
+ TORCH_API explicit AliasDb(
55
+ std::shared_ptr<Graph> graphi,
56
+ bool isFrozen = false,
57
+ bool descendFunctionCalls = false);
58
+ TORCH_API ~AliasDb();
59
+
60
+ // There are limitations to what effects the alias analysis can track. Two
61
+ // kinds of nodes may have untracked effects:
62
+ // 1. Nodes that write to a value that may alias the graph inputs (since
63
+ // the inputs can be used outside the graph).
64
+ // 2. Nodes that write to something in the wildcard set.
65
+ //
66
+ // These nodes are considered not safe to eliminate or mutate under any
67
+ // circumstances.
68
+ bool writesToWildcard(Node* n) const;
69
+
70
+ // Does `n` write to an alias of one of the values in `vs`?
71
+ // if `recurseBlocks` is true, consider writes on the nodes in `n`s sub-blocks
72
+ TORCH_API bool writesToAlias(Node* n, const ValueSet& vs) const;
73
+
74
+ // Does `a` and `b` potentially share a memory location or do either
75
+ // hold in memory any element that exists in the other
76
+ TORCH_API bool mayContainAlias(Value* a, Value* b) const;
77
+
78
+ TORCH_API bool mayContainAlias(Value* a, const at::ArrayRef<Value*> b) const;
79
+
80
+ // Do any values in group `a` share a memory location or hold in memory
81
+ // any element that exists in group `b`
82
+ TORCH_API bool mayContainAlias(
83
+ const at::ArrayRef<Value*> a,
84
+ const at::ArrayRef<Value*> b) const;
85
+
86
+ // Do `a` and `b` potentially share a memory location?
87
+ TORCH_API bool mayAlias(const Value* a, const Value* b) const;
88
+ // Do any values in group `a` potentially share a memory location with any
89
+ // value in group `b`? i.e. may they overlap?
90
+ TORCH_API bool mayAlias(const ValueSet& a, const ValueSet& b) const;
91
+
92
+ // Do any nodes write to an alias set input to `n`?
93
+ TORCH_API bool hasInputWriters(const Node* n) const;
94
+
95
+ // Do any nodes write to an alias set output by `n`?
96
+ TORCH_API bool hasOutputWriters(const Node* n) const;
97
+
98
+ // Do any nodes write to an alias set inputed/outputed by `n`?
99
+ TORCH_API bool hasWriters(const Node* n) const;
100
+
101
+ // Do any nodes write to `v`s memory location?
102
+ TORCH_API bool hasWriters(const Value* v) const;
103
+
104
+ // Is the operation in-place? i.e. doesn't write anywhere but locations it
105
+ // reads from.
106
+ TORCH_API bool isMutable(Node* n) const;
107
+
108
+ TORCH_API bool escapesScope(const at::ArrayRef<Value*>& vs) const;
109
+
110
+ // Is it safe to change whether `a` and `b` alias each other ?
111
+ TORCH_API bool safeToChangeAliasingRelationship(
112
+ const at::ArrayRef<Value*>& a,
113
+ const at::ArrayRef<Value*>& b) const;
114
+
115
+ // Move `n` (already in the graph) after `movePoint` in the topological order.
116
+ //
117
+ // Tries to preserve value dependencies, so other nodes might be moved. We
118
+ // make two guarantees about the postcondition of the node list:
119
+ // - `n` is directly after `movePoint`.
120
+ // - only nodes between `n` and `movePoint` have been moved.
121
+ //
122
+ // Returns `false` if it's impossible to move `n` after `MovePoint` without
123
+ // violating dependencies, otherwise executes the move and returns `true`
124
+ TORCH_API bool moveAfterTopologicallyValid(Node* n, Node* movePoint);
125
+ TORCH_API bool moveBeforeTopologicallyValid(Node* n, Node* movePoint);
126
+
127
+ bool couldMoveAfterTopologically(Node* n, Node* movePoint);
128
+ bool couldMoveBeforeTopologically(Node* n, Node* movePoint);
129
+
130
+ // For debugging: print alias db state to stdout
131
+ TORCH_API void dump() const;
132
+ TORCH_API std::string toString() const;
133
+
134
+ // Generates a DOT (www.graphviz.org) graph representation
135
+ //
136
+ // Returns `true` if the output file was successfully generated
137
+ //
138
+ // WARNING: The output dot file path can't include shell specific notations,
139
+ // for example you can't use "~/temp/aliasdb.dot"
140
+ // (instead, use "/home/user/temp/aliasdb.dot")
141
+ //
142
+ TORCH_API bool dumpToGraphvizFile(const char* filename) const;
143
+ TORCH_API std::string toGraphviz() const;
144
+
145
+ // Returns `true` if the given element is mutable or if it is a
146
+ // container type with an internal mutable element (e.g.
147
+ // `Tuple[int, Tensor]` has an internal mutable type `Tensor`, so
148
+ // it would be considered a "mutable type" in AliasDb)
149
+ static bool isMutableType(const Value* v);
150
+ static bool isMutableType(const TypePtr& type);
151
+
152
+ /**
153
+ * Mutation API
154
+ *
155
+ * These methods allow you to update AliasDb in-place if you are performing
156
+ * graph mutation.
157
+ *
158
+ * WARNING: These methods should be considered INTERNAL. They do not perform
159
+ * very many correctness checks, the user is responsible for making sure they
160
+ * are updating AliasDb correctly. `Lint()`ing the AliasDb can help with
161
+ * this.
162
+ */
163
+ // Copy `existing`s aliasing info to `new_value`, and remove `existing`.
164
+ TORCH_API void replaceWithNewValue(Value* existing, Value* new_value);
165
+ // Copy `from`s aliasing info to `to`.
166
+ TORCH_API void copyValue(Value* from, Value* to);
167
+ // Create a new `value` that does not alias anything else.
168
+ TORCH_API void createValue(const Value* value);
169
+
170
+ // Enable more precise treatment of prim::TupleConstruct.
171
+ void enablePreciseTupleContainerAnalysis();
172
+
173
+ friend struct MutationRemover;
174
+
175
+ private:
176
+ // Helper for topologically-safe node moves.
177
+ class WorkingSet;
178
+ enum class MoveSide { BEFORE, AFTER };
179
+ bool tryMove(Node* toMove, Node* movePoint, MoveSide moveSide, bool dryRun);
180
+ void move(Node* toMove, Node* movePoint, MoveSide moveSide);
181
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
182
+
183
+ bool isMutableTypeInternal(const Value* v) const;
184
+ bool isMutableTypeInternal(const TypePtr& type) const;
185
+
186
+ /**
187
+ * Write and read internal API
188
+ */
189
+ // Get all the values that `n` writes to.
190
+ // NOTE: this only returns values directly written to, not aliases thereof
191
+ //
192
+ // if `recurseBlocks` is true, gather writes on the nodes in `n`s sub-blocks
193
+ MemoryLocations getWrites(Node* n) const;
194
+ void getWritesImpl(Node* n, MemoryLocations& ret) const;
195
+ // Register the fact that `n` writes to `v`.
196
+ void registerWrite(const Value* v, Node* n, bool writeToContained = false);
197
+ // Get all the values that `n` reads from.
198
+ // if `recurseBlocks` is true, gather reads on the nodes in `n`s sub-blocks
199
+ MemoryLocations getReads(Node* n) const;
200
+ void getReadsImpl(Node* n, MemoryLocations& ret) const;
201
+
202
+ /**
203
+ * Wildcard methods
204
+ */
205
+ // Register `v` as a wildcard value.
206
+ c10::optional<Element*> setWildcard(const Value* v);
207
+
208
+ // Is this a value which will not alias?
209
+ bool nonAliasingValue(const Value* elem) const;
210
+
211
+ /**
212
+ * Special analysis methods
213
+ */
214
+ void analyze(const std::shared_ptr<Graph>& graph);
215
+ void analyze(Block* block);
216
+ void analyze(Node* node);
217
+ void analyzeImpl(Node* node);
218
+ void analyzeIf(Node* node);
219
+ void analyzeLoop(Node* node);
220
+ void analyzeSubgraph(Node* node, std::shared_ptr<Graph> subgraph);
221
+ void analyzeSubgraph(Node* node);
222
+ void analyzeCreator(Node* node);
223
+ void analyzeExtractor(Node* node);
224
+ void analyzeChunk(Node* node);
225
+ void analyzeBroadcastingChunk(Node* node);
226
+ void analyzeFork(Node* node);
227
+ void analyzeWait(Node* node);
228
+ void analyzeAwaitable(Node* node);
229
+ void analyzeAwaitableWait(Node* node);
230
+ void analyzeRpcAsync(Node* node);
231
+ void analyzeBatchNorm(Node* node);
232
+ void analyzeInstanceNorm(Node* node);
233
+ void analyzeGradOf(Node* node);
234
+ void analyzeSetAttr(Node* node);
235
+ void analyzeConservative(Node* node);
236
+ void analyzeContainerConstruct(Node* node);
237
+ bool tryRegisteredAnalysis(Node* node);
238
+
239
+ /**
240
+ * Alias manipulation methods
241
+ */
242
+ void makeAllAlias(const std::vector<Value*>& values);
243
+ void makePointerTo(const Value* value, const Value* to);
244
+ TORCH_API void addToContainedElements(
245
+ const Value* element,
246
+ const Value* container);
247
+ void mapAliases(at::ArrayRef<Value*> to, at::ArrayRef<Value*> from);
248
+ void giveFreshAlias(
249
+ const Value* value,
250
+ bool add_wildcard_to_contained_elems = true);
251
+ Element* getOrCreateElement(const Value* value);
252
+
253
+ const AliasTypeSet* mapTypeToAliasTypeSetPtr(const TypePtr& type) const;
254
+ bool functionalNonEscapingListUse(const Use& use) const;
255
+ bool functionalNonEscapingTupleUse(const Use& use) const;
256
+
257
+ std::shared_ptr<Graph> graph_;
258
+
259
+ // If the Module is frozen then consider attributes as freshly created
260
+ // objects. Freezing API invokes alias analysis to check if they are mutated
261
+ // internally.
262
+ bool isFrozen_;
263
+
264
+ bool descend_function_calls_;
265
+ std::unordered_map<Graph*, std::vector<std::shared_ptr<Graph>>>
266
+ function_call_copies_;
267
+
268
+ // The points-to graph that stores aliasing relationships
269
+ std::unique_ptr<MemoryDAGBuilder> memoryDAGBuilder_;
270
+ std::unique_ptr<MemoryDAG> memoryDAG_;
271
+
272
+ // Mapping of values to MemoryDAG elements
273
+ ska::flat_hash_map<const Value*, Element*> elementMap_;
274
+ // All wildcard Elements (one for each unique mutable type)
275
+ ska::flat_hash_map<TypePtr, Element*, HashType, EqualType> wildcardIndex_;
276
+ Element* getWildcard(const TypePtr& type) const;
277
+ c10::optional<Element*> tryGetOrCreateWildcard(const TypePtr& type);
278
+ void addContainedTypesToFreshElement(
279
+ Element* container_elem,
280
+ const AliasTypeSet& mut_types);
281
+ void pointUnionTypeElementToAllContainedTypes(
282
+ Element* container_elem,
283
+ const AliasTypeSet& mut_types);
284
+
285
+ std::vector<Element*> getElements(at::ArrayRef<Value*> vs) const;
286
+ bool mayAliasWildcard(const Value* v) const;
287
+ bool mayAliasWildcard(const at::ArrayRef<Value*> vs) const;
288
+ bool hasWriters(const at::ArrayRef<Value*>& values) const;
289
+
290
+ // Cached mapping of type ptrs to their mutable types
291
+ mutable ska::flat_hash_map<TypePtr, AliasTypeSet> mapped_mutable_types_;
292
+
293
+ /**
294
+ * State for tracking write info.
295
+ */
296
+ // Write registry where the analysis can record the writes as it sees them.
297
+ // This information is later denormalized into various caches to improve query
298
+ // efficiency.
299
+ struct WriteRegistry;
300
+ std::unique_ptr<WriteRegistry> writeRegistry_;
301
+
302
+ // Map of nodes to the memory locations that they write to
303
+ using TWriteIndex = ska::flat_hash_map<Node*, MemoryLocations>;
304
+ c10::optional<TWriteIndex> writeIndex_;
305
+ // Collection of all memory locations that are written to.
306
+ c10::optional<MemoryLocations> writtenToLocationsIndex_;
307
+ void buildWrittenToLocationsIndex();
308
+
309
+ std::unordered_set<const Value*> wildcards_;
310
+
311
+ std::string getElementName(const Element* e) const;
312
+
313
+ friend void Lint(const AliasDb* db);
314
+ };
315
+
316
+ // Helper check that invariants over AliasDb are maintained.
317
+ // Useful if you are using the AliasDb mutation API and want to check you did
318
+ // the right thing.
319
+ TORCH_API void Lint(const AliasDb* db);
320
+
321
+ } // namespace jit
322
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <string>
4
+ #include <vector>
5
+
6
+ #include <ATen/core/jit_type_base.h>
7
+ #include <ATen/core/symbol.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+
11
+ namespace torch {
12
+ namespace jit {
13
+
14
+ using ::c10::Symbol;
15
+
16
+ constexpr int max_tensor_display_size = 10;
17
+
18
+ enum class AttributeKind {
19
+ f,
20
+ fs,
21
+ c,
22
+ cs,
23
+ i,
24
+ is,
25
+ s,
26
+ ss,
27
+ t,
28
+ ts,
29
+ g,
30
+ gs,
31
+ ty,
32
+ tys,
33
+ ival
34
+ };
35
+ static inline const char* toString(AttributeKind kind) {
36
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
37
+ static const char* names[] = {
38
+ "f",
39
+ "c",
40
+ "cs",
41
+ "fs",
42
+ "i",
43
+ "is",
44
+ "s",
45
+ "ss",
46
+ "t",
47
+ "ts",
48
+ "g",
49
+ "gs",
50
+ "ty",
51
+ "tys",
52
+ "ival"};
53
+ AT_ASSERT(size_t(kind) < sizeof(names) / sizeof(*names));
54
+ return names[int(kind)];
55
+ }
56
+
57
+ struct AttributeValue {
58
+ AttributeValue(Symbol name) : name(name) {}
59
+ using Ptr = std::unique_ptr<AttributeValue>;
60
+ Symbol name;
61
+ virtual AttributeKind kind() const = 0;
62
+ virtual Ptr clone() const = 0;
63
+ virtual ~AttributeValue() = default;
64
+ };
65
+
66
+ template <typename T, AttributeKind Kind>
67
+ struct ScalarAttributeValue : public AttributeValue {
68
+ using ConstructorType = T;
69
+ using ValueType = T;
70
+ ScalarAttributeValue(Symbol name, ConstructorType value_)
71
+ : AttributeValue(name), value_(std::move(value_)) {}
72
+ ValueType& value() {
73
+ return value_;
74
+ }
75
+ Ptr clone() const override {
76
+ return Ptr(new ScalarAttributeValue(name, value_));
77
+ }
78
+ AttributeKind kind() const override {
79
+ return Kind;
80
+ }
81
+
82
+ private:
83
+ ValueType value_;
84
+ };
85
+
86
+ template <typename T, AttributeKind Kind>
87
+ struct VectorAttributeValue : public AttributeValue {
88
+ using ConstructorType = std::vector<T>;
89
+ using ValueType = std::vector<T>;
90
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
91
+ VectorAttributeValue(Symbol name, ConstructorType value_)
92
+ : AttributeValue(name), value_(std::move(value_)) {}
93
+ ValueType& value() {
94
+ return value_;
95
+ }
96
+ AttributeKind kind() const override {
97
+ return Kind;
98
+ }
99
+ std::unique_ptr<AttributeValue> clone() const override {
100
+ auto copy = value_;
101
+ return Ptr(new VectorAttributeValue(name, std::move(copy)));
102
+ }
103
+
104
+ private:
105
+ ValueType value_;
106
+ };
107
+
108
+ using ComplexAttr =
109
+ ScalarAttributeValue<c10::complex<double>, AttributeKind::c>;
110
+ using ComplexValsAttr =
111
+ VectorAttributeValue<c10::complex<double>, AttributeKind::cs>;
112
+ using FloatAttr = ScalarAttributeValue<double, AttributeKind::f>;
113
+ using FloatsAttr = VectorAttributeValue<double, AttributeKind::fs>;
114
+ using IntAttr = ScalarAttributeValue<int64_t, AttributeKind::i>;
115
+ using IntsAttr = VectorAttributeValue<int64_t, AttributeKind::is>;
116
+ using StringAttr = ScalarAttributeValue<std::string, AttributeKind::s>;
117
+ using StringsAttr = VectorAttributeValue<std::string, AttributeKind::ss>;
118
+ using TensorAttr = ScalarAttributeValue<at::Tensor, AttributeKind::t>;
119
+ using TensorsAttr = VectorAttributeValue<at::Tensor, AttributeKind::ts>;
120
+ using TypeAttr = ScalarAttributeValue<c10::TypePtr, AttributeKind::ty>;
121
+ using TypesAttr = VectorAttributeValue<c10::TypePtr, AttributeKind::tys>;
122
+ using IValueAttr = ScalarAttributeValue<at::IValue, AttributeKind::ival>;
123
+
124
+ struct Graph;
125
+
126
+ // We special case Graph attributes like this because we want to ensure that
127
+ // Graph::copy() is called when we clone() these attributes.
128
+ struct TORCH_API GraphAttr : public AttributeValue {
129
+ using ConstructorType = std::shared_ptr<Graph>;
130
+ using ValueType = std::shared_ptr<Graph>;
131
+ GraphAttr(Symbol name, ConstructorType value_)
132
+ : AttributeValue(name), value_(std::move(value_)) {}
133
+ ValueType& value() {
134
+ return value_;
135
+ }
136
+ Ptr clone() const override;
137
+ AttributeKind kind() const override {
138
+ return AttributeKind::g;
139
+ }
140
+
141
+ private:
142
+ std::shared_ptr<Graph> value_;
143
+ };
144
+
145
+ struct TORCH_API GraphsAttr : public AttributeValue {
146
+ using ConstructorType = std::vector<std::shared_ptr<Graph>>;
147
+ using ValueType = std::vector<std::shared_ptr<Graph>>;
148
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
149
+ GraphsAttr(Symbol name, ConstructorType value_)
150
+ : AttributeValue(name), value_(std::move(value_)) {}
151
+ ValueType& value() {
152
+ return value_;
153
+ }
154
+ AttributeKind kind() const override {
155
+ return AttributeKind::gs;
156
+ }
157
+ std::unique_ptr<AttributeValue> clone() const override;
158
+
159
+ private:
160
+ ValueType value_;
161
+ };
162
+
163
+ struct IRAttributeError : public std::exception {
164
+ IRAttributeError(Symbol name, bool defined) {
165
+ std::stringstream ss;
166
+ // NOLINTNEXTLINE(bugprone-branch-clone)
167
+ if (!defined) {
168
+ ss << "required keyword attribute '" << name.toUnqualString()
169
+ << "' is undefined";
170
+ } else {
171
+ ss << "required keyword attribute '" << name.toUnqualString()
172
+ << "' has the wrong type";
173
+ }
174
+ msg = ss.str();
175
+ }
176
+ const char* what() const noexcept override {
177
+ return msg.c_str();
178
+ }
179
+
180
+ private:
181
+ std::string msg;
182
+ };
183
+ } // namespace jit
184
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ // helpers for handling constants in the IR
9
+ // - create constant nodes from ints, floats, complex, intlist, Tensors, and
10
+ // other types
11
+ // - implement primitive constant ops.
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ using ::c10::IValue;
16
+
17
+ struct Graph;
18
+ struct Value;
19
+
20
+ // thrown when insertConstant cannot encode the IValue into a graph
21
+ struct TORCH_API constant_not_supported_error : public std::runtime_error {
22
+ using runtime_error::runtime_error;
23
+ };
24
+
25
+ TORCH_API Value* insertConstant(
26
+ Graph& g,
27
+ const IValue& val,
28
+ c10::optional<SourceRange> loc = c10::nullopt,
29
+ c10::optional<ScopePtr> scope = c10::nullopt);
30
+
31
+ // note: prefer g.insertConsant(val, loc) which does exactly the same thing
32
+ // this function is only declared/defined here because its implementation is
33
+ // closely related to the implementation of prim::Constant that is also in
34
+ // constants.cpp.
35
+ //
36
+ // returns a c10::nullopt if the IValue kind cannot be inserted as a constant
37
+ TORCH_API c10::optional<Value*> tryInsertConstant(
38
+ Graph& g,
39
+ const IValue& val,
40
+ c10::optional<SourceRange> loc = c10::nullopt,
41
+ c10::optional<ScopePtr> scope = c10::nullopt);
42
+
43
+ ////////////////////////////////////////////////////////////////////////////////
44
+ // Helper for retrieving constants
45
+ ////////////////////////////////////////////////////////////////////////////////
46
+
47
+ // attempt to convert a (possibly constant) Value* into an interpreter value
48
+ // (IValue). returns c10::nullopt if the Value* was not constant
49
+ TORCH_API c10::optional<IValue> toIValue(const Value* v);
50
+
51
+ // if a value is a constant then try to turn into type T using the
52
+ // same rules as the interpreter
53
+ template <typename T>
54
+ c10::optional<T> constant_as(const Value* v) {
55
+ if (auto ivalue = toIValue(v)) {
56
+ return ivalue->to<T>();
57
+ }
58
+ return c10::nullopt;
59
+ }
60
+ } // namespace jit
61
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Intrusive doubly linked lists with sane reverse iterators.
9
+ // The header file is named generic_graph_node_list.h because it is ONLY
10
+ // used for Graph's Node lists, and if you want to use it for other
11
+ // things, you will have to do some refactoring.
12
+ //
13
+ // At the moment, the templated type T must support a few operations:
14
+ //
15
+ // - It must have a field: T* next_in_graph[2] = { nullptr, nullptr };
16
+ // which are used for the intrusive linked list pointers.
17
+ //
18
+ // - It must have a method 'destroy()', which removes T from the
19
+ // list and frees a T.
20
+ //
21
+ // In practice, we are only using it with Node and const Node. 'destroy()'
22
+ // needs to be renegotiated if you want to use this somewhere else.
23
+ //
24
+ // Regardless of the iteration direction, iterators always physically point
25
+ // to the element they logically point to, rather than
26
+ // the off-by-one behavior for all standard library reverse iterators like
27
+ // std::list.
28
+
29
+ // The list is includes two sentinel nodes, one at the beginning and one at the
30
+ // end with a circular link between them. It is an error to insert nodes after
31
+ // the end sentinel node but before the beginning node:
32
+
33
+ // Visualization showing only the next() links:
34
+ // HEAD -> first -> second -> ... -> last -> TAIL
35
+ // ^------------------------------------------
36
+
37
+ // Visualization showing only the prev() links:
38
+ // HEAD <- first <- second <- ... <- last <- TAIL
39
+ // ------------------------------------------^
40
+
41
+ static constexpr int kNextDirection = 0;
42
+ static constexpr int kPrevDirection = 1;
43
+
44
+ template <typename T>
45
+ struct generic_graph_node_list;
46
+
47
+ template <typename T>
48
+ struct generic_graph_node_list_iterator;
49
+
50
+ struct Node;
51
+ using graph_node_list = generic_graph_node_list<Node>;
52
+ using const_graph_node_list = generic_graph_node_list<const Node>;
53
+ using graph_node_list_iterator = generic_graph_node_list_iterator<Node>;
54
+ using const_graph_node_list_iterator =
55
+ generic_graph_node_list_iterator<const Node>;
56
+
57
+ template <typename T>
58
+ struct generic_graph_node_list_iterator {
59
+ generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {}
60
+ generic_graph_node_list_iterator(T* cur, int d) : cur(cur), d(d) {}
61
+ generic_graph_node_list_iterator(
62
+ const generic_graph_node_list_iterator& rhs) = default;
63
+ generic_graph_node_list_iterator(
64
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
65
+ generic_graph_node_list_iterator& operator=(
66
+ const generic_graph_node_list_iterator& rhs) = default;
67
+ generic_graph_node_list_iterator& operator=(
68
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
69
+ T* operator*() const {
70
+ return cur;
71
+ }
72
+ T* operator->() const {
73
+ return cur;
74
+ }
75
+ generic_graph_node_list_iterator& operator++() {
76
+ AT_ASSERT(cur);
77
+ cur = cur->next_in_graph[d];
78
+ return *this;
79
+ }
80
+ generic_graph_node_list_iterator operator++(int) {
81
+ generic_graph_node_list_iterator old = *this;
82
+ ++(*this);
83
+ return old;
84
+ }
85
+ generic_graph_node_list_iterator& operator--() {
86
+ AT_ASSERT(cur);
87
+ cur = cur->next_in_graph[reverseDir()];
88
+ return *this;
89
+ }
90
+ generic_graph_node_list_iterator operator--(int) {
91
+ generic_graph_node_list_iterator old = *this;
92
+ --(*this);
93
+ return old;
94
+ }
95
+
96
+ // erase cur without invalidating this iterator
97
+ // named differently from destroy so that ->/. bugs do not
98
+ // silently cause the wrong one to be called.
99
+ // iterator will point to the previous entry after call
100
+ void destroyCurrent() {
101
+ T* n = cur;
102
+ cur = cur->next_in_graph[reverseDir()];
103
+ n->destroy();
104
+ }
105
+ generic_graph_node_list_iterator reverse() {
106
+ return generic_graph_node_list_iterator(cur, reverseDir());
107
+ }
108
+
109
+ private:
110
+ int reverseDir() {
111
+ return d == kNextDirection ? kPrevDirection : kNextDirection;
112
+ }
113
+ T* cur;
114
+ int d; // direction 0 is forward 1 is reverse, see next_in_graph
115
+ };
116
+
117
+ template <typename T>
118
+ struct generic_graph_node_list {
119
+ using iterator = generic_graph_node_list_iterator<T>;
120
+ using const_iterator = generic_graph_node_list_iterator<const T>;
121
+ generic_graph_node_list_iterator<T> begin() {
122
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[d], d);
123
+ }
124
+ generic_graph_node_list_iterator<const T> begin() const {
125
+ return generic_graph_node_list_iterator<const T>(head->next_in_graph[d], d);
126
+ }
127
+ generic_graph_node_list_iterator<T> end() {
128
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[!d], d);
129
+ }
130
+ generic_graph_node_list_iterator<const T> end() const {
131
+ return generic_graph_node_list_iterator<const T>(
132
+ head->next_in_graph[!d], d);
133
+ }
134
+ generic_graph_node_list_iterator<T> rbegin() {
135
+ return reverse().begin();
136
+ }
137
+ generic_graph_node_list_iterator<const T> rbegin() const {
138
+ return reverse().begin();
139
+ }
140
+ generic_graph_node_list_iterator<T> rend() {
141
+ return reverse().end();
142
+ }
143
+ generic_graph_node_list_iterator<const T> rend() const {
144
+ return reverse().end();
145
+ }
146
+ generic_graph_node_list reverse() {
147
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
148
+ }
149
+ const generic_graph_node_list reverse() const {
150
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
151
+ }
152
+ T* front() {
153
+ return head->next_in_graph[d];
154
+ }
155
+ const T* front() const {
156
+ return head->next_in_graph[d];
157
+ }
158
+ T* back() {
159
+ return head->next_in_graph[!d];
160
+ }
161
+ const T* back() const {
162
+ return head->next_in_graph[!d];
163
+ }
164
+ generic_graph_node_list(T* head, int d) : head(head), d(d) {}
165
+
166
+ private:
167
+ T* head; // both head and tail are sentinel nodes
168
+ // the first real node is head->next_in_graph[d]
169
+ // the tail sentinel is head->next_in_graph[!d]
170
+ int d;
171
+ };
172
+
173
+ template <typename T>
174
+ static inline bool operator==(
175
+ generic_graph_node_list_iterator<T> a,
176
+ generic_graph_node_list_iterator<T> b) {
177
+ return *a == *b;
178
+ }
179
+
180
+ template <typename T>
181
+ static inline bool operator!=(
182
+ generic_graph_node_list_iterator<T> a,
183
+ generic_graph_node_list_iterator<T> b) {
184
+ return *a != *b;
185
+ }
186
+
187
+ } // namespace jit
188
+ } // namespace torch
189
+
190
+ namespace std {
191
+
192
+ template <typename T>
193
+ struct iterator_traits<torch::jit::generic_graph_node_list_iterator<T>> {
194
+ using difference_type = int64_t;
195
+ using value_type = T*;
196
+ using pointer = T**;
197
+ using reference = T*&;
198
+ using iterator_category = bidirectional_iterator_tag;
199
+ };
200
+
201
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ TORCH_API TypePtr getTensorType(const at::Tensor& t, bool complete);
11
+
12
+ TORCH_API TypePtr inferShapeAndTypeForInput(
13
+ TypePtr input_type,
14
+ Stack::const_iterator& s_iter,
15
+ const Stack::const_iterator& s_iter_end,
16
+ bool complete);
17
+
18
+ TORCH_API void setInputTensorTypes(
19
+ Graph& g,
20
+ const Stack& stack,
21
+ bool complete,
22
+ const std::vector<int>& param_count_list = {});
23
+
24
+ } // namespace jit
25
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h ADDED
@@ -0,0 +1,1841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/attributes.h>
4
+ #include <torch/csrc/jit/ir/graph_node_list.h>
5
+ #include <torch/csrc/jit/ir/named_value.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+ #include <torch/csrc/jit/runtime/operator.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+ #include <torch/csrc/utils/python_stub.h>
11
+ #include <torch/csrc/utils/schema_info.h>
12
+
13
+ #include <ATen/Utils.h>
14
+ #include <ATen/core/Tensor.h>
15
+ #include <ATen/core/dynamic_type.h>
16
+ #include <ATen/core/enum_type.h>
17
+ #include <ATen/core/functional.h>
18
+ #include <ATen/core/interned_strings.h>
19
+ #include <ATen/core/ivalue.h>
20
+ #include <ATen/core/jit_type.h>
21
+ #include <c10/util/ArrayRef.h>
22
+ #include <c10/util/Exception.h>
23
+ #include <c10/util/Optional.h>
24
+
25
+ #include <functional>
26
+ #include <iosfwd>
27
+ #include <unordered_set>
28
+ #include <vector>
29
+
30
+ // Forward declare, the real meat is in python_ir.cpp
31
+ template <class T>
32
+ class THPPointer;
33
+ using THPObjectPtr = THPPointer<PyObject>;
34
+ using pyobj_list = std::vector<THPObjectPtr>;
35
+
36
+ namespace torch {
37
+ namespace jit {
38
+ namespace utils {
39
+ TORCH_API std::string getNodesModuleHierarchy(const Node& n);
40
+ } // namespace utils
41
+ class AliasDb;
42
+
43
+ using ::c10::Argument;
44
+ using ::c10::FunctionSchema;
45
+ using ::c10::Symbol;
46
+
47
+ using ::c10::ivalue::Shared;
48
+
49
+ using ::c10::IValue;
50
+ using ::c10::ivalue::Future;
51
+
52
+ using ::c10::ivalue::ConstantString;
53
+
54
+ #define C10_USING(T) using ::c10::T;
55
+ C10_FORALL_TYPES(C10_USING)
56
+ #undef C10_USING
57
+
58
+ #define C10_USING(T) using ::c10::T##Ptr;
59
+ C10_FORALL_TYPES(C10_USING)
60
+ #undef C10_USING
61
+
62
+ using ::c10::Type;
63
+ using ::c10::TypeEnv;
64
+ using ::c10::TypePtr;
65
+
66
+ using ::c10::getTypePtr;
67
+ using ::c10::MatchTypeReturn;
68
+ using ::c10::TypeKind;
69
+
70
+ using ::c10::fmap;
71
+
72
+ namespace prim {
73
+ using namespace ::c10::prim;
74
+ }
75
+ namespace attr {
76
+ using namespace ::c10::attr;
77
+ }
78
+ namespace aten {
79
+ using namespace ::c10::aten;
80
+ }
81
+ namespace cuda {
82
+ #if !defined(USE_ROCM)
83
+ using namespace ::c10::cuda;
84
+ #endif
85
+ } // namespace cuda
86
+
87
+ struct Function;
88
+ struct GraphFunction;
89
+ struct MatchedSchema;
90
+
91
+ // A Graph represents one "function" of computation.
92
+ // It uses a simple ownership model where the graph owns all the nodes inside
93
+ // it. All references inside the graph are raw pointers. Destroying the Graph
94
+ // will invalidate any pointers to nodes in the graph.
95
+ struct Graph;
96
+
97
+ // Node is the base class of the IR graph. It represents one computation
98
+ // and dependencies on a list of Values. The "prim-ops", so to speak.
99
+ struct Node;
100
+
101
+ // A Value represents an input or output to node that is either a
102
+ // Tensor or an opaque Handle object, as determined by type().
103
+ struct Value;
104
+
105
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
106
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Node& n);
107
+
108
+ // A list of nodes, with inputs and outputs
109
+ struct Block;
110
+
111
+ // Each use is represented by this type, see 'Node::uses()'
112
+ // 'user' is the consumer of the value, 'offset' is the index into
113
+ // 'user's input this where the producers will be found.
114
+ struct Use {
115
+ Use(Node* user, size_t offset) : user(user), offset(offset) {}
116
+ Node* user;
117
+ size_t offset;
118
+
119
+ bool operator==(const Use& b) {
120
+ return user == b.user && offset == b.offset;
121
+ }
122
+ };
123
+
124
+ // Note [User node does not uniquely identify use]
125
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
126
+ // A while back, we wrote some code manipulating uses that looked like this:
127
+ //
128
+ // for (auto& use : used_val->uses_) {
129
+ // if (use.user == this_node) {
130
+ // use.offset += 1;
131
+ // break;
132
+ // }
133
+ // }
134
+ //
135
+ // This code is trying to find a particular use (our node's use) to update it.
136
+ // However, it's wrong: there may be *multiple* uses of a value %x in a node,
137
+ // as might be the case in this IR:
138
+ //
139
+ // %y = Add %x %x
140
+ //
141
+ // In this case, there are two uses of %x whose user is the node 'Add %x %x'.
142
+ // So, "use induced by this node" is not a well-formed concept.
143
+ //
144
+ // If you are looking for "use induced by an input", it's best to use
145
+ // findUseForInput() to get it.
146
+
147
+ // the list types are intentionally simple, but we type-def
148
+ // them here so if we need to change them, refactoring will be easier
149
+ using node_list = std::vector<Node*>;
150
+ using value_list = std::vector<Value*>;
151
+ using use_list = std::vector<Use>;
152
+ template <typename T>
153
+ using ArrayRef = at::ArrayRef<T>;
154
+ using NodeKind = Symbol;
155
+ using topo_position_t = int64_t;
156
+ using ValueSet = std::unordered_set<const Value*>;
157
+
158
+ struct OperatorSet;
159
+ template <typename T>
160
+ struct OperatorMap;
161
+
162
+ // This is a wrapper to allow invalidating the Python object
163
+ // safely when the C++ object for a Node/Value/Block is deleted
164
+ // like much of graph, it isn't safe for different threads to
165
+ // access the same graph
166
+ template <typename T>
167
+ struct Wrap {
168
+ explicit Wrap(T* p) : elem(p), clear_cb(nullptr) {}
169
+ void clear() {
170
+ if (clear_cb) {
171
+ clear_cb(elem);
172
+ }
173
+ elem = nullptr;
174
+ }
175
+ T* elem;
176
+ void (*clear_cb)(void*);
177
+ };
178
+
179
+ struct Value {
180
+ AT_DISALLOW_COPY_AND_ASSIGN(Value);
181
+ Value(Node* node_, size_t offset_);
182
+
183
+ private:
184
+ friend struct Node;
185
+ friend struct Graph;
186
+ Node* node_;
187
+ size_t offset_;
188
+ size_t unique_ = 0; // unique id
189
+ use_list uses_;
190
+ std::string unique_name_;
191
+ TypePtr type_;
192
+ // a managing wrapper for Python to allow invalidation
193
+ std::shared_ptr<Wrap<Value>> wrap_;
194
+
195
+ public:
196
+ Value* setType(TypePtr type);
197
+ TORCH_API void inferTypeFrom(const at::Tensor& output);
198
+ TORCH_API void inferTypeFrom(
199
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
200
+ const TypePtr& type() const {
201
+ AT_ASSERT(type_ != nullptr);
202
+ return type_;
203
+ }
204
+ bool requires_grad() const {
205
+ return type()->requires_grad();
206
+ }
207
+ bool isCompleteTensor() const {
208
+ if (auto pt = type()->cast<TensorType>()) {
209
+ return pt->isComplete();
210
+ }
211
+ return false;
212
+ }
213
+ TORCH_API bool mustBeNone() const;
214
+ TORCH_API bool mustNotBeNone() const;
215
+ size_t unique() const {
216
+ return unique_;
217
+ }
218
+ bool hasDebugName() const {
219
+ return !unique_name_.empty();
220
+ }
221
+ static bool isValidName(const std::string& name);
222
+ TORCH_API Value* setDebugName(const std::string& name);
223
+ std::string debugName() const {
224
+ if (hasDebugName()) {
225
+ return unique_name_;
226
+ }
227
+ return c10::to_string(unique());
228
+ }
229
+ TORCH_API std::string debugNameBase() const;
230
+ Node* node() {
231
+ return node_;
232
+ }
233
+ size_t offset() const {
234
+ return offset_;
235
+ }
236
+ void setOffset(size_t offset) {
237
+ offset_ = offset;
238
+ }
239
+ const Node* node() const {
240
+ return node_;
241
+ }
242
+
243
+ /**
244
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
245
+ * Check #87343 for details.
246
+ */
247
+ Graph* owningGraph();
248
+ const Graph* owningGraph() const;
249
+ // TODO: make this more const correct
250
+ const use_list& uses() const {
251
+ return uses_;
252
+ }
253
+
254
+ bool hasUses() const {
255
+ return !uses().empty();
256
+ }
257
+
258
+ TORCH_API void replaceFirstUseWith(Value* newValue);
259
+
260
+ // Replaces all uses of this value with 'newValue'.
261
+ //
262
+ // Given: %3 = f(%1, %2)
263
+ // %4 = g(%3)
264
+ // %5 = h(%3, %3)
265
+ // Execute: %3.replaceAllUsesWith(%6)
266
+ // Result: %3 = f(%1, %2)
267
+ // %4 = g(%6)
268
+ // %5 = h(%6, %6)
269
+ TORCH_API void replaceAllUsesWith(Value* newValue);
270
+
271
+ // Replaces all uses of this value with 'newValue' after 'node'.
272
+ // Given: %3 = f(%1, %2)
273
+ // %4 = g(%3)
274
+ // %5 = inplace_(%3)
275
+ // %6 = h(%3, %3)
276
+ // Execute: %3.replaceAllUsesAfterNodeWith(%5.node(), %5)
277
+ // Result: %3 = f(%1, %2)
278
+ // %4 = g(%3)
279
+ // %5 = inplace_(%3)
280
+ // %6 = h(%5, %5)
281
+ // XXX: does not check scoping legality, consider using
282
+ // replaceAllUsesDominatedByNodeWith
283
+ TORCH_API void replaceAllUsesAfterNodeWith(const Node* node, Value* newValue);
284
+
285
+ // Replaces all uses of this value with 'newValue' that are dominated by
286
+ // 'node'. Given:
287
+ // x = op(...).
288
+ // if cond:
289
+ // z = foo(..)
290
+ // bar(x)
291
+ // else:
292
+ // print(x)
293
+ // x.replaceAllUsesDominatedByNodeWith(foo, z) would replace bar(x)
294
+ // but not print(x) because print is not dominated by foo.
295
+ // replaceAllUsesAfterNode does not check domination, so in this example
296
+ // it would produce invalid IR.
297
+ TORCH_API void replaceAllUsesDominatedByNodeWith(
298
+ const Node* node,
299
+ Value* newValue);
300
+
301
+ TORCH_API Value* copyMetadata(Value* from);
302
+
303
+ TORCH_API std::shared_ptr<Wrap<Value>> wrap() {
304
+ if (!wrap_) {
305
+ wrap_ = std::make_shared<Wrap<Value>>(this);
306
+ }
307
+ return wrap_;
308
+ }
309
+
310
+ virtual ~Value() {
311
+ if (wrap_) {
312
+ wrap_->clear();
313
+ }
314
+ }
315
+ };
316
+
317
+ struct TORCH_API Node {
318
+ AT_DISALLOW_COPY_AND_ASSIGN(Node);
319
+ friend struct Graph;
320
+ friend struct Block;
321
+ friend struct Value;
322
+ friend graph_node_list;
323
+ friend const_graph_node_list;
324
+ friend graph_node_list_iterator;
325
+ friend const_graph_node_list_iterator;
326
+
327
+ private:
328
+ const NodeKind kind_;
329
+ std::vector<Value*> inputs_;
330
+ std::vector<Value*> outputs_;
331
+ // subblocks
332
+ std::vector<Block*> blocks_;
333
+ Graph* graph_;
334
+ Block* owning_block_;
335
+ c10::optional<SourceRange> source_range_;
336
+ ScopePtr scope_;
337
+ c10::optional<InlinedCallStackPtr> callstack_;
338
+ // Assumes FunctionSchemas are persistent, so we don't manage their lifetime.
339
+ // This field is effective a cache that's populated on attribute lookups and
340
+ // invalidated every time we perform an operation that could potentially
341
+ // change the schema. note: mutable because schema_ is effectively a cache
342
+ mutable const Operator* op_;
343
+ topo_position_t topo_position_ = 0;
344
+ // a managing wrapper for Python to allow invalidation
345
+ std::shared_ptr<Wrap<Node>> wrap_;
346
+ // Stores the full schema name, if the operator is historic
347
+ // When the operator is deprecated or the name of the operator
348
+ // is changed, we need to rely on this name
349
+ // to retrieve old schemas to successfully apply upgraders
350
+ // for this operator.
351
+ c10::optional<std::string> historic_schema_name_ = c10::nullopt;
352
+
353
+ protected:
354
+ Node(Graph* graph_, NodeKind kind_); // defined after graph
355
+ public:
356
+ // Each Node but Return/Param Nodes are associated with exactly one
357
+ // place in the Node list of the Graph. The Graph itself is a circular
358
+ // doubly-linked list. The Return Node is used as the sentinel for the
359
+ // "beginning"/"end" of the list. This means that you can tell when
360
+ // you've traversed the entire list without means worrying about null
361
+ // pointers. `next_in_graph[0]` is the pointer to the next Node, while
362
+ // `next_in_graph[1]` is the pointer to the previous Node. The
363
+ // linked list is implemented as an array to allow the same iterator
364
+ // class for forward and reversed Node lists. Taken together, this
365
+ // list also represents a topological sort of the Nodes in the Graph.
366
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-non-private-member-variables-in-classes,modernize-avoid-c-arrays)
367
+ Node* next_in_graph[2] = {nullptr, nullptr};
368
+
369
+ std::shared_ptr<Wrap<Node>> wrap() {
370
+ if (!wrap_) {
371
+ wrap_ = std::make_shared<Wrap<Node>>(this);
372
+ }
373
+ return wrap_;
374
+ }
375
+
376
+ const c10::optional<std::string> getHistoricSchemaName() {
377
+ return historic_schema_name_;
378
+ }
379
+
380
+ void setHistoricSchemaName(const std::string& name) {
381
+ historic_schema_name_ = name;
382
+ }
383
+
384
+ Node*& next() {
385
+ return next_in_graph[kNextDirection];
386
+ }
387
+ Node*& prev() {
388
+ return next_in_graph[kPrevDirection];
389
+ }
390
+ Node* const& next() const {
391
+ return next_in_graph[kNextDirection];
392
+ }
393
+ Node* const& prev() const {
394
+ return next_in_graph[kPrevDirection];
395
+ }
396
+
397
+ NodeKind kind() const {
398
+ return kind_;
399
+ }
400
+ Node* setSourceRange(SourceRange r) {
401
+ source_range_ = std::move(r);
402
+ return this;
403
+ }
404
+ SourceRange sourceRange() const;
405
+
406
+ /**
407
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
408
+ * Check #87343 for details.
409
+ */
410
+ Graph* owningGraph() {
411
+ return graph_;
412
+ }
413
+ const Graph* owningGraph() const {
414
+ return graph_;
415
+ }
416
+ Block* owningBlock() {
417
+ return owning_block_;
418
+ }
419
+ const Block* owningBlock() const {
420
+ return owning_block_;
421
+ }
422
+ ScopePtr scope() {
423
+ return scope_;
424
+ }
425
+ void setScope(ScopePtr scope) {
426
+ scope_ = std::move(scope);
427
+ }
428
+ std::string scopeName() const {
429
+ if (!scope_) {
430
+ return "";
431
+ }
432
+ return scope_->namesFromRoot();
433
+ }
434
+
435
+ // Copies the source range, scope and callstack from another node.
436
+ Node* copyMetadata(Node* from) {
437
+ this->setSourceRange(from->sourceRange());
438
+ this->setScope(from->scope());
439
+ if (auto cs = from->callstack()) {
440
+ this->setCallStack(*cs);
441
+ }
442
+ return this;
443
+ }
444
+
445
+ c10::optional<InlinedCallStackPtr> callstack() const {
446
+ return callstack_;
447
+ }
448
+ void setCallStack(InlinedCallStackPtr cs) {
449
+ callstack_ = std::move(cs);
450
+ }
451
+
452
+ // NB: This returns an ArrayRef; that means that it will
453
+ // get invalidated if you resize inputs (e.g., using addInput)
454
+ // We can't return a std::vector<Node*>& because there's no
455
+ // way to soundly cast to std::vector<const Node*> (an insane
456
+ // implementation of std::vector could make this representationally
457
+ // different.)
458
+ at::ArrayRef<Value*> inputs() {
459
+ return inputs_;
460
+ }
461
+ at::ArrayRef<const Value*> inputs() const {
462
+ // Vectors are not convertible in const-ness of elements, but
463
+ // raw pointers are.
464
+ return {inputs_.data(), inputs_.size()};
465
+ }
466
+ // NB: This returns an ArrayRef; that means that it will
467
+ // get invalidated if you resize inputs (e.g., using addInput)
468
+ // We can't return a std::vector<Node*>& because there's no
469
+ // way to soundly cast to std::vector<const Node*> (an insane
470
+ // implementation of std::vector could make this representationally
471
+ // different.)
472
+ at::ArrayRef<Value*> outputs() {
473
+ return outputs_;
474
+ }
475
+ at::ArrayRef<const Value*> outputs() const {
476
+ // Vectors are not convertible in const-ness of elements, but
477
+ // raw pointers are.
478
+ return {outputs_.data(), outputs_.size()};
479
+ }
480
+ Value* output(size_t i) const {
481
+ return outputs_.at(i);
482
+ }
483
+ bool hasUses() const {
484
+ for (auto o : outputs()) {
485
+ if (!o->uses().empty()) {
486
+ return true;
487
+ }
488
+ }
489
+ return false;
490
+ }
491
+
492
+ void replaceAllUsesWith(Node* n);
493
+
494
+ // replaces `this` with a new node with the same inputs and outputs
495
+ // but a new node symbol. does not destroy `this`
496
+ Node* replaceWithNewSymbol(Symbol new_symbol);
497
+
498
+ // Checks if this node is dominated by `dominator` which means that
499
+ // `dominator` will always be executed before `this` and `dominator`
500
+ // is in scope of `this.
501
+ bool isDominatedBy(const Node* dominator) const;
502
+
503
+ // lots of things like chunk have a single input or single output, so we have
504
+ // a helper to make accessing it easier
505
+ Value* input() {
506
+ AT_ASSERT(inputs_.size() == 1);
507
+ return inputs_.at(0);
508
+ }
509
+ Value* output() {
510
+ AT_ASSERT(outputs_.size() == 1);
511
+ return outputs_.at(0);
512
+ }
513
+ const Value* output() const {
514
+ AT_ASSERT(outputs_.size() == 1);
515
+ return outputs_.at(0);
516
+ }
517
+ const Value* input() const {
518
+ AT_ASSERT(inputs_.size() == 1);
519
+ return inputs_.at(0);
520
+ }
521
+ // Access a particular input. This is a checked index.
522
+ Value* input(size_t i) const {
523
+ return inputs_.at(i);
524
+ }
525
+
526
+ bool hasNamedInput(const std::string& unqualName) const;
527
+ Value* namedInput(const std::string& unqualName) const;
528
+ Value* namedInput(Symbol name) const;
529
+
530
+ c10::optional<IValue> get(Symbol name) const;
531
+
532
+ template <typename T>
533
+ c10::optional<T> get(Symbol name) const {
534
+ if (auto v = get(name)) {
535
+ return v->template to<T>();
536
+ }
537
+ return c10::nullopt;
538
+ }
539
+
540
+ // Returns true if the value of input name is statically known
541
+ bool is_constant(Symbol name) const {
542
+ return static_cast<bool>(get(name));
543
+ }
544
+ bool mustBeNone() const;
545
+
546
+ bool isNondeterministic() const;
547
+ bool hasSideEffects() const;
548
+
549
+ // instructions lowered by the interpreter and not run in the optimized graph
550
+ bool notExecutedOp() const {
551
+ return kind_ == prim::Constant || kind_ == prim::profile ||
552
+ kind_ == prim::profile_ivalue;
553
+ }
554
+
555
+ // Graphs
556
+
557
+ // Note [Topological invariant]
558
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
559
+ // We always maintain an up-to-date topological ordering of all nodes via
560
+ // the next()/prev() links. All transformations to graphs must preserve
561
+ // this topological ordering: for example, it is only valid to 'addInput'
562
+ // with an input which is topologically before the current node.
563
+ //
564
+ // Usually, it is obvious whether or not topological order is maintained;
565
+ // for example, if you are adding nodes to the end of the topsort, it's
566
+ // impossible for them to refer to inputs that are not in the topsort.
567
+ // If it is not obvious, please comment accordingly.
568
+
569
+ // Add 'node' as an input to 'this' at the end of existing
570
+ // arguments. Returns the added node for ease of chaining.
571
+ //
572
+ // Given: %3 = f(%1, %2)
573
+ // Execute: %3.addInput(%4)
574
+ // Result: %3 = f(%1, %2, %4)
575
+ Value* addInput(Value* value);
576
+
577
+ // Add 'value' as an input to 'this' at the specified position in the
578
+ // arguments. Returns the added value for ease of chaining.
579
+ Value* insertInput(size_t i, Value* value);
580
+
581
+ // Replace the input of 'this' at position 'i' with
582
+ // 'newValue', returning the old node.
583
+ //
584
+ // Given: %3 = f(%1, %2)
585
+ // Execute: %3.replaceInput(1, %4)
586
+ // Result: %3 = f(%1, %4)
587
+ Value* replaceInput(size_t i, Value* newValue);
588
+
589
+ // Replace all occurrences of 'from' in the inputs of this
590
+ // node with 'to'. Corresponds to llvm's replaceUsesOfWith.
591
+ //
592
+ // Given: %3 = f(%1, %2, %1)
593
+ // Execute: %3.replaceInputWith(%1, %4)
594
+ // Result: %3 = f(%4, %2, %4)
595
+ void replaceInputWith(Value* from, Value* to);
596
+
597
+ Value* addOutput();
598
+
599
+ Value* insertOutput(size_t i);
600
+
601
+ void eraseOutput(size_t i);
602
+
603
+ Block* addBlock();
604
+ void eraseBlock(size_t i);
605
+
606
+ // Each Node can have a list of subblocks. These are used to define structured
607
+ // nested control flow operators such as If and Loop.
608
+ // The meaning of a block is specific to the kind of node it is in, but
609
+ // all blocks share these semantics:
610
+ // * Nested lexical scoping: If a node 'Parent' has a subblock which contains
611
+ // a node 'Child', Child can use any value that was in scope for the Parent
612
+ // node in addition to any values defined before 'Child' in the subblock.
613
+ // * The list of inputs to the block are in scope for the duration of the
614
+ // block
615
+ // * the outputs of the Parent node are not in scope for the subblocks
616
+ // Typically the inputs to a block that represents control flow act as
617
+ // as the equivalents phi-nodes in standard SSA form,
618
+ // defining a new Value to represent any term that has multiple
619
+ // definitions depending on how control flowed. Outputs of the node containing
620
+ // control flow serve a similiar purpose defining new values for variables
621
+ // that would have different definitions depending on which way control
622
+ // flowed.
623
+
624
+ at::ArrayRef<Block*> blocks() {
625
+ return blocks_;
626
+ }
627
+ at::ArrayRef<const Block*> blocks() const {
628
+ // Vectors are not convertible in const-ness of elements, but
629
+ // raw pointers are.
630
+ return {blocks_.data(), blocks_.size()};
631
+ }
632
+
633
+ // Is 'this' before 'n' in the topological order?
634
+ bool isBefore(const Node* n) const;
635
+
636
+ // Is 'this' after 'n' in the topological order?
637
+ bool isAfter(const Node* n) const;
638
+
639
+ // Insert unattached 'this' node before 'n' in the topological order.
640
+ // Returns this (for chaining).
641
+ //
642
+ // Given: %3 = f(%1, %2)
643
+ // %4 = g(%3)
644
+ // and unattached: %5 = h(%1)
645
+ // Execute: %5.insertBefore(%4)
646
+ // Result: %3 = f(%1, %2)
647
+ // %5 = h(%1)
648
+ // %4 = g(%3)
649
+ Node* insertBefore(Node* n);
650
+
651
+ // Insert unattached 'this' node after 'n' in the topological order.
652
+ // Returns this (for chaining).
653
+ //
654
+ // Given: %3 = f(%1, %2)
655
+ // %4 = g(%3)
656
+ // and unattached: %5 = h(%1)
657
+ // Execute: %5.insertAfter(%4)
658
+ // Result: %3 = f(%1, %2)
659
+ // %4 = g(%3)
660
+ // %5 = h(%1)
661
+ Node* insertAfter(Node* n);
662
+
663
+ // Move 'this' (already in the graph) after 'n' in the topological order.
664
+ //
665
+ // NOTE: Does not check that value dependencies are preserved, see
666
+ // AliasDb::moveAfterTopologicallyValid
667
+ //
668
+ // Given: %2 = f(%1)
669
+ // %3 = g(%1)
670
+ // Execute: %2.moveAfter(%3)
671
+ // Result: %3 = g(%1)
672
+ // %2 = f(%1)
673
+ //
674
+ void moveAfter(Node* n);
675
+
676
+ // Move a node 'n' (already in the graph) before 'this' in the topological
677
+ // order.
678
+ //
679
+ // NOTE: Does not check that value dependencies are preserved, see
680
+ // AliasDb::moveBeforeTopologicallyValid
681
+ //
682
+ // Given: %2 = f(%1)
683
+ // %3 = g(%1)
684
+ // Execute: %3.moveBefore(%2)
685
+ // Result: %3 = g(%1)
686
+ // %2 = f(%1)
687
+ void moveBefore(Node* n);
688
+
689
+ // Remove the input at 'i' from this node.
690
+ //
691
+ // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling
692
+ // removeInput.
693
+ //
694
+ // Given: %3 = f(%1, %2)
695
+ // Execute: %3.removeInput(1)
696
+ // Result: %3 = f(%1)
697
+ void removeInput(size_t i);
698
+
699
+ // Remove all inputs from a node.
700
+ //
701
+ // Given: %3 = f(%1, %2)
702
+ // Execute: %3.removeAllInputs()
703
+ // Result: %3 = f()
704
+ void removeAllInputs();
705
+
706
+ // Remove all outputs from a node.
707
+ //
708
+ // Given: %1, %2 = f()
709
+ // Execute:removeAllInputs()
710
+ // Result: = f()
711
+ void removeAllOutputs();
712
+
713
+ // Rearrange the ordering of inputs or outputs of a node
714
+ // Given: %3 = f(%1, %2)
715
+ // Execute: %3.permuteInputs({1, 0})
716
+ // Result: %3 = f(%2, %1)
717
+ // Each index must appear exactly once
718
+ void permuteInputs(const std::vector<size_t>& new_inputs);
719
+ void permuteOutputs(const std::vector<size_t>& new_inputs);
720
+
721
+ // iterators of the node list starting at this node
722
+ // useful for resuming a search starting at this node
723
+ inline graph_node_list_iterator iterator() {
724
+ return {this, 0};
725
+ }
726
+ inline graph_node_list_iterator reverseIterator() {
727
+ return iterator().reverse();
728
+ }
729
+ inline const_graph_node_list_iterator iterator() const {
730
+ return {this, 0};
731
+ }
732
+ inline const_graph_node_list_iterator reverseIterator() const {
733
+ return iterator().reverse();
734
+ }
735
+
736
+ // Remove 'this' from the instruction list and deallocate it.
737
+ //
738
+ // Invariant: no outputs of 'this' may have any uses.
739
+ //
740
+ // Given: %2 = f(%1)
741
+ // %3 = g(%1)
742
+ // Execute: %2.destroy()
743
+ // Result: %3 = g(%1)
744
+ void destroy();
745
+
746
+ // Dynamically cast this node to the subclass indicated by the
747
+ // template variable, returning nullptr if the cast is invalid..
748
+ //
749
+ // Example usage: if(auto s = n.cast<Select>()) { ... }
750
+ template <typename T>
751
+ T* cast() {
752
+ if (T::Kind == kind()) {
753
+ return static_cast<T*>(this);
754
+ }
755
+ return nullptr;
756
+ }
757
+ template <typename T>
758
+ const T* cast() const {
759
+ if (T::Kind == kind()) {
760
+ return static_cast<const T*>(this);
761
+ }
762
+ return nullptr;
763
+ }
764
+
765
+ template <typename T>
766
+ T* expect() {
767
+ TORCH_CHECK(
768
+ T::Kind == kind(),
769
+ "expected a ",
770
+ T::Kind.toDisplayString(),
771
+ " but found a ",
772
+ kind().toDisplayString());
773
+ return static_cast<T*>(this);
774
+ }
775
+
776
+ bool matches(const FunctionSchema& schema) const;
777
+
778
+ // XXX: this function is meant to be used with string literals only!
779
+ bool matches(
780
+ const char* signature_literal,
781
+ at::ArrayRef<Symbol> const_inputs = {}) const;
782
+
783
+ bool isMemberOf(const OperatorSet& os) const;
784
+ template <typename T>
785
+ bool isMemberOf(const OperatorMap<T>& om) const {
786
+ auto it = om.map.find(kind());
787
+ if (it == om.map.end()) {
788
+ return false;
789
+ }
790
+ for (auto& op : it->second) {
791
+ if (matches(op.first->schema())) {
792
+ return true;
793
+ }
794
+ }
795
+ return false;
796
+ }
797
+
798
+ const FunctionSchema& schema() const;
799
+ const FunctionSchema* maybeSchema() const;
800
+ const Operator& getOperator() const;
801
+ Operation getOperation() const;
802
+
803
+ const Operator* maybeOperator() const;
804
+
805
+ void dump() const;
806
+
807
+ std::ostream& print(
808
+ std::ostream& out,
809
+ size_t level,
810
+ std::vector<const Node*>* groups,
811
+ bool print_source_locations = true,
812
+ bool print_attributes = true,
813
+ bool print_scopes = true,
814
+ bool print_body = true) const;
815
+
816
+ virtual ~Node() {
817
+ if (wrap_) {
818
+ wrap_->clear();
819
+ }
820
+ }
821
+
822
+ // Methods for accessing attributes
823
+ Node* copyAttributes(const Node& rhs) {
824
+ values_.clear();
825
+ for (const AVPtr& i : rhs.values_) {
826
+ values_.push_back(i->clone());
827
+ }
828
+ return this;
829
+ }
830
+ bool hasAttribute(Symbol name) const {
831
+ AT_ASSERT(name.is_attr());
832
+ return findAttr(name, false) != values_.end();
833
+ }
834
+ bool hasAttributeS(const std::string& name) const {
835
+ return hasAttribute(Symbol::attr(name));
836
+ }
837
+ AttributeKind kindOf(Symbol name) const {
838
+ AT_ASSERT(name.is_attr());
839
+ return (*findAttr(name, true))->kind();
840
+ }
841
+ AttributeKind kindOfS(const std::string& name) const {
842
+ return kindOf(Symbol::attr(name));
843
+ }
844
+ Node* removeAttribute(Symbol name) {
845
+ AT_ASSERT(name.is_attr());
846
+ values_.erase(findAttr(name, true));
847
+ return this;
848
+ }
849
+ Node* removeAttributeS(const std::string& name) {
850
+ return removeAttribute(Symbol::attr(name));
851
+ }
852
+ bool hasAttributes() const {
853
+ return !values_.empty();
854
+ }
855
+ size_t numAttributes() const {
856
+ return values_.size();
857
+ }
858
+ // The names are returned in order, since name actually is the index.
859
+ std::vector<Symbol> attributeNames() const {
860
+ std::vector<Symbol> names;
861
+ names.reserve(values_.size());
862
+ for (const AVPtr& a : values_) {
863
+ names.push_back(a->name);
864
+ }
865
+ return names;
866
+ }
867
+ std::vector<const char*> attributeNamesS() const {
868
+ std::vector<const char*> names;
869
+ names.reserve(values_.size());
870
+ for (const AVPtr& a : values_) {
871
+ names.push_back(a->name.toUnqualString());
872
+ }
873
+ return names;
874
+ }
875
+
876
+ #define CREATE_ACCESSOR(Kind, method) \
877
+ Node* method##_(Symbol name, Kind##Attr::ConstructorType v) { \
878
+ return setAttr<Kind##Attr>( \
879
+ name, std::forward<Kind##Attr::ConstructorType>(v)); \
880
+ } \
881
+ const Kind##Attr::ValueType& method(Symbol name) const { \
882
+ return getAttr<Kind##Attr>(name); \
883
+ }
884
+
885
+ CREATE_ACCESSOR(Float, f)
886
+ CREATE_ACCESSOR(Complex, c)
887
+ CREATE_ACCESSOR(Floats, fs)
888
+ CREATE_ACCESSOR(ComplexVals, cs)
889
+ CREATE_ACCESSOR(String, s)
890
+ CREATE_ACCESSOR(Strings, ss)
891
+ CREATE_ACCESSOR(Int, i)
892
+ CREATE_ACCESSOR(Ints, is)
893
+ CREATE_ACCESSOR(Graph, g)
894
+ CREATE_ACCESSOR(Graphs, gs)
895
+ CREATE_ACCESSOR(Type, ty)
896
+ CREATE_ACCESSOR(Types, tys)
897
+ CREATE_ACCESSOR(IValue, ival)
898
+
899
+ #undef CREATE_ACCESSOR
900
+
901
+ // Our Graphs are not very const-correct, so we need to allow returning
902
+ // non-const references too
903
+ GraphAttr::ValueType& g(Symbol name) {
904
+ return getAttr<GraphAttr>(name);
905
+ }
906
+
907
+ // does not use CREATE_ACCESSOR because we need additional asserts
908
+ Node* t_(Symbol name, TensorAttr::ConstructorType v) {
909
+ return setAttr<TensorAttr>(
910
+ name, std::forward<TensorAttr::ConstructorType>(v));
911
+ }
912
+ const TensorAttr::ValueType& t(Symbol name) const {
913
+ return getAttr<TensorAttr>(name);
914
+ }
915
+
916
+ Node* ts_(Symbol name, TensorsAttr::ConstructorType v) {
917
+ return setAttr<TensorsAttr>(
918
+ name, std::forward<TensorsAttr::ConstructorType>(v));
919
+ }
920
+ const TensorsAttr::ValueType& ts(Symbol name) const {
921
+ return getAttr<TensorsAttr>(name);
922
+ }
923
+
924
+ Block* findCommonAncestorBlockWith(Node* n);
925
+
926
+ size_t blocksFromGraphBlock();
927
+
928
+ private:
929
+ void printAttrValue(std::ostream& out, const Symbol& name) const;
930
+ void printAttributes(std::ostream& out, bool ignore_subgraph) const;
931
+
932
+ template <typename T>
933
+ Node* setAttr(Symbol name, typename T::ConstructorType v) {
934
+ AT_ASSERT(name.is_attr());
935
+ auto it = findAttr(name, false);
936
+ auto nv = AVPtr(new T(name, std::forward<typename T::ConstructorType>(v)));
937
+ // NOLINTNEXTLINE(bugprone-branch-clone)
938
+ if (it == values_.end()) {
939
+ values_.push_back(std::move(nv));
940
+ } else {
941
+ *it = std::move(nv);
942
+ }
943
+ return this;
944
+ }
945
+ template <typename T>
946
+ typename T::ValueType& getAttr(Symbol name) const {
947
+ AT_ASSERT(name.is_attr());
948
+ auto it = findAttr(name, true);
949
+ auto* child = dynamic_cast<T*>(it->get());
950
+ if (child == nullptr) {
951
+ throw IRAttributeError(name, true);
952
+ }
953
+ return child->value();
954
+ }
955
+ using AVPtr = AttributeValue::Ptr;
956
+ // NB: For determinism, we use a vector rather than a hash map. This does
957
+ // mean that lookups are O(n), so you shouldn't use Attributes to store
958
+ // a big pile of messages.
959
+ std::vector<AVPtr> values_;
960
+ std::vector<AVPtr>::iterator findAttr(Symbol name, bool required) {
961
+ AT_ASSERT(name.is_attr());
962
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
963
+ return v->name == name;
964
+ });
965
+ if (required && it == values_.end()) {
966
+ throw IRAttributeError(name, false);
967
+ }
968
+ AT_ASSERT(!required || it != values_.end());
969
+ return it;
970
+ }
971
+ std::vector<AVPtr>::const_iterator findAttr(Symbol name, bool required)
972
+ const {
973
+ AT_ASSERT(name.is_attr());
974
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
975
+ return v->name == name;
976
+ });
977
+ if (required && it == values_.end()) {
978
+ throw IRAttributeError(name, false);
979
+ }
980
+ AT_ASSERT(!required || it != values_.end());
981
+ return it;
982
+ }
983
+
984
+ enum class MoveSide { BEFORE, AFTER };
985
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
986
+
987
+ std::pair<Value*, const Argument&> findInput(Symbol name);
988
+ // Lookup iterator in use list of _input i_ that corresponds to its use of
989
+ // _this_
990
+ use_list::iterator findUseForInput(size_t i);
991
+
992
+ // remove the use of input i, this sets input i to nullptr, but
993
+ // is only used internally to Node before setting it to a new value
994
+ // or erasing the entry from the list.
995
+ Value* dropInput(size_t i);
996
+
997
+ bool inBlockList() const {
998
+ if (next() == nullptr) {
999
+ AT_ASSERT(prev() == nullptr);
1000
+ }
1001
+ return next() != nullptr;
1002
+ }
1003
+
1004
+ void removeFromList();
1005
+ void lint() const;
1006
+
1007
+ void assignTopoPosition();
1008
+
1009
+ protected:
1010
+ // subclasses must override
1011
+ // this function is used by createClone to initialize a new version
1012
+ // of a node in another graph. It should allocate a new instance of the same
1013
+ // concrete type as 'this', but in graph 'g' which might be different
1014
+ // than graph_
1015
+ virtual Node* allocNewInstance(Graph* g) {
1016
+ return new Node(g, kind());
1017
+ }
1018
+ // create a copy of all properties of Node s into this.
1019
+ // subclasses should extend if they have additional information to copy.
1020
+ // 'this' will be allocated with s->allocNewInstance(g) so it should have
1021
+ // the same concrete type as 's'
1022
+ virtual void cloneFrom(Node* s);
1023
+ };
1024
+
1025
+ struct Block {
1026
+ friend struct Node;
1027
+ friend struct Graph;
1028
+
1029
+ AT_DISALLOW_COPY_AND_ASSIGN(Block);
1030
+ TORCH_API Block(Graph* graph_, Node* node_);
1031
+
1032
+ at::ArrayRef<Value*> inputs() {
1033
+ return input_->outputs();
1034
+ }
1035
+ at::ArrayRef<const Value*> inputs() const {
1036
+ const auto& inputs = input_->outputs();
1037
+ return {inputs.data(), inputs.size()};
1038
+ }
1039
+ at::ArrayRef<Value*> outputs() {
1040
+ return output_->inputs();
1041
+ }
1042
+ at::ArrayRef<const Value*> outputs() const {
1043
+ return static_cast<const Node*>(output_)->inputs();
1044
+ }
1045
+ graph_node_list nodes() {
1046
+ return {input_, kNextDirection};
1047
+ }
1048
+ const_graph_node_list nodes() const {
1049
+ return {input_, kNextDirection};
1050
+ }
1051
+ Node* return_node() {
1052
+ return output_;
1053
+ }
1054
+ const Node* return_node() const {
1055
+ return output_;
1056
+ }
1057
+ Node* param_node() {
1058
+ return input_;
1059
+ }
1060
+ const Node* param_node() const {
1061
+ return input_;
1062
+ }
1063
+ /**
1064
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
1065
+ * Check #87343 for details.
1066
+ */
1067
+ Graph* owningGraph() {
1068
+ return graph_;
1069
+ }
1070
+ const Graph* owningGraph() const {
1071
+ return graph_;
1072
+ }
1073
+ Node* owningNode() {
1074
+ return owning_node_;
1075
+ }
1076
+ const Node* owningNode() const {
1077
+ return owning_node_;
1078
+ }
1079
+
1080
+ Value* addInput(const std::string& name = "") {
1081
+ Value* v = input_->addOutput();
1082
+ v->setDebugName(name);
1083
+ return v;
1084
+ }
1085
+ Value* insertInput(size_t i, const std::string& name = "") {
1086
+ Value* v = input_->insertOutput(i);
1087
+ v->setDebugName(name);
1088
+ return v;
1089
+ }
1090
+ void eraseInput(size_t i) {
1091
+ input_->eraseOutput(i);
1092
+ }
1093
+ void removeAllInputs() {
1094
+ input_->removeAllOutputs();
1095
+ }
1096
+ size_t registerOutput(Value* v) {
1097
+ output_->addInput(v);
1098
+ return outputs().size() - 1;
1099
+ }
1100
+ size_t insertOutput(size_t i, Value* n) {
1101
+ output_->insertInput(i, n);
1102
+ return i;
1103
+ }
1104
+ void eraseOutput(size_t i) {
1105
+ output_->removeInput(i);
1106
+ }
1107
+ void removeAllOutputs() {
1108
+ output_->removeAllInputs();
1109
+ }
1110
+
1111
+ void replaceOutput(size_t i, Value* n) {
1112
+ output_->replaceInput(i, n);
1113
+ }
1114
+ void permuteOutputs(const std::vector<size_t>& new_inputs) {
1115
+ output_->permuteInputs(new_inputs);
1116
+ }
1117
+ void permuteInputs(const std::vector<size_t>& new_inputs) {
1118
+ input_->permuteOutputs(new_inputs);
1119
+ }
1120
+
1121
+ Node* appendNode(Node* n) {
1122
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1123
+ n->insertBefore(output_);
1124
+ return n;
1125
+ }
1126
+ Node* prependNode(Node* n) {
1127
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1128
+ n->insertAfter(input_);
1129
+ return n;
1130
+ }
1131
+
1132
+ // clone all inputs, nodes, and outputs from src and append them
1133
+ // to the inputs, nodes, and outputs of this block
1134
+ // value_map is used whenever a node in src references a free variable
1135
+ // in src to look up its corresponding value
1136
+ TORCH_API void cloneFrom(Block* src, std::function<Value*(Value*)> value_map);
1137
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1138
+
1139
+ TORCH_API std::shared_ptr<Wrap<Block>> wrap() {
1140
+ if (!wrap_) {
1141
+ wrap_ = std::make_shared<Wrap<Block>>(this);
1142
+ }
1143
+ return wrap_;
1144
+ }
1145
+
1146
+ virtual ~Block() {
1147
+ if (wrap_) {
1148
+ wrap_->clear();
1149
+ }
1150
+ }
1151
+
1152
+ void clear() {
1153
+ removeAllOutputs();
1154
+ for (auto it = nodes().rbegin(); it != nodes().rend(); it++) {
1155
+ it.destroyCurrent();
1156
+ }
1157
+ removeAllInputs();
1158
+ }
1159
+
1160
+ private:
1161
+ void reIndexTopology();
1162
+
1163
+ // get rid of all nodes
1164
+ // destroys in reverse order so that uses internal to this block
1165
+ // do not have to be removed before you can destroy the block
1166
+ void destroy();
1167
+
1168
+ Graph* const graph_;
1169
+ // holds outputs in a way that can be reflected
1170
+ // as a Use object
1171
+ // also used as the beginning/end of the circular node list to avoid
1172
+ // having corner cases where the list is empty.
1173
+ Node* const output_;
1174
+ Node* const input_;
1175
+ Node* const
1176
+ owning_node_; // either the node that has this block or nullptr for root
1177
+ // a managing wrapper for Python to allow invalidation
1178
+ std::shared_ptr<Wrap<Block>> wrap_;
1179
+ };
1180
+
1181
+ struct Graph : std::enable_shared_from_this<Graph> {
1182
+ AT_DISALLOW_COPY_AND_ASSIGN(Graph);
1183
+ friend struct Node;
1184
+ friend struct Value;
1185
+ friend struct Block;
1186
+
1187
+ private:
1188
+ // only used to keep track of allocated nodes
1189
+ // actual representation of Graph is done with
1190
+ // inputs, outputs, nodes
1191
+
1192
+ std::unordered_set<const Node*> all_nodes;
1193
+ std::unordered_set<const Value*> all_values;
1194
+ std::unordered_set<const Block*> all_blocks;
1195
+ size_t next_unique_;
1196
+
1197
+ std::unordered_map<std::string, Value*> unique_names_;
1198
+ // name_base_suffix tracks largest suffix currently used by all names sharing
1199
+ // same name_base. Key of this map is name_base, value is largest suffix
1200
+ // numeric value.
1201
+ std::unordered_map<std::string, size_t> name_base_suffix_;
1202
+
1203
+ ScopePtr current_scope_;
1204
+
1205
+ Block* const block_;
1206
+ // when insertNode() is called, the node is inserted before this node
1207
+ // by default this is set to append to the top level block
1208
+ Node* insert_before_;
1209
+ int64_t predicted_insert_count_ = 0;
1210
+
1211
+ c10::optional<size_t> op_version_;
1212
+
1213
+ public:
1214
+ Graph(ScopePtr scope_root = c10::make_intrusive<Scope>())
1215
+ : next_unique_(0),
1216
+ current_scope_(std::move(scope_root)),
1217
+ block_(new Block(this, nullptr)),
1218
+ insert_before_(return_node()) {}
1219
+
1220
+ at::ArrayRef<Value*> inputs() {
1221
+ return block_->inputs();
1222
+ }
1223
+ at::ArrayRef<const Value*> inputs() const {
1224
+ const Block& block = *block_;
1225
+ return block.inputs();
1226
+ }
1227
+ at::ArrayRef<Value*> outputs() {
1228
+ return block_->outputs();
1229
+ }
1230
+ at::ArrayRef<const Value*> outputs() const {
1231
+ const Block& block = *block_;
1232
+ return block.outputs();
1233
+ }
1234
+ graph_node_list nodes() {
1235
+ return block_->nodes();
1236
+ }
1237
+ const_graph_node_list nodes() const {
1238
+ const Block& block = *block_;
1239
+ return block.nodes();
1240
+ }
1241
+ Node* param_node() {
1242
+ return block_->param_node();
1243
+ }
1244
+ const Node* param_node() const {
1245
+ return block_->param_node();
1246
+ }
1247
+ Node* return_node() {
1248
+ return block_->return_node();
1249
+ }
1250
+ const Node* return_node() const {
1251
+ return block_->return_node();
1252
+ }
1253
+ const std::unordered_map<std::string, Value*>& debugNames() const {
1254
+ return unique_names_;
1255
+ }
1256
+
1257
+ TORCH_API void push_scope(const std::string& scope_name);
1258
+ TORCH_API void pop_scope();
1259
+
1260
+ ScopePtr current_scope() {
1261
+ return current_scope_;
1262
+ }
1263
+
1264
+ void set_op_version(c10::optional<size_t> version) {
1265
+ op_version_ = version;
1266
+ }
1267
+
1268
+ c10::optional<size_t> get_op_version() {
1269
+ return op_version_;
1270
+ }
1271
+
1272
+ void set_current_scope(ScopePtr scope) {
1273
+ current_scope_ = std::move(scope);
1274
+ }
1275
+
1276
+ Value* addInput(const std::string& name = "") {
1277
+ return block_->addInput(name);
1278
+ }
1279
+ Value* insertInput(size_t i, const std::string& name = "") {
1280
+ return block_->insertInput(i, name);
1281
+ }
1282
+ void eraseInput(size_t i) {
1283
+ block_->eraseInput(i);
1284
+ }
1285
+ size_t registerOutput(Value* n) {
1286
+ return block_->registerOutput(n);
1287
+ }
1288
+ void eraseOutput(size_t i) {
1289
+ block_->eraseOutput(i);
1290
+ }
1291
+
1292
+ TORCH_API Node* create(NodeKind kind, size_t num_outputs = 1);
1293
+ TORCH_API Node* create(
1294
+ NodeKind kind,
1295
+ ArrayRef<Value*> inputs,
1296
+ size_t num_outputs = 1);
1297
+
1298
+ TORCH_API Node* createNone();
1299
+ TORCH_API Node* createAutogradZero();
1300
+ TORCH_API Node* createUninitialized(TypePtr typ);
1301
+ TORCH_API Node* createWithSubgraph(Symbol kind);
1302
+ TORCH_API Node* createDifferentiableSubgraph();
1303
+ TORCH_API Node* createTuple(
1304
+ at::ArrayRef<Value*> values,
1305
+ TupleTypePtr optional_named_tuple = nullptr);
1306
+ TORCH_API Node* createTupleUnpack(Value* v);
1307
+ TORCH_API Node* createTupleIndex(
1308
+ Value* tup,
1309
+ Value* idx,
1310
+ const TypePtr& output_type);
1311
+ TORCH_API Node* createTupleSlice(
1312
+ Value* tup,
1313
+ int64_t beg,
1314
+ int64_t step_size,
1315
+ int64_t num_values);
1316
+ TORCH_API Node* createEnumName(Value* e);
1317
+ TORCH_API Node* createEnumValue(Value* e);
1318
+ TORCH_API Node* createList(
1319
+ const TypePtr& contained_type,
1320
+ at::ArrayRef<Value*> values);
1321
+ TORCH_API Node* createListUnpack(Value* v, size_t size);
1322
+ TORCH_API Node* createDict(
1323
+ const TypePtr& key_type,
1324
+ const TypePtr& value_type,
1325
+ at::ArrayRef<Value*> keys,
1326
+ at::ArrayRef<Value*> values);
1327
+ TORCH_API Node* createNumToTensor(Value* value);
1328
+ TORCH_API Node* createObject(const ClassTypePtr& type);
1329
+ TORCH_API Node* createSetAttr(
1330
+ Value* obj,
1331
+ const std::string& field,
1332
+ Value* newValue);
1333
+ TORCH_API Node* createGetAttr(Value* obj, const std::string& field);
1334
+ Value* insertGetAttr(Value* obj, const std::string& field) {
1335
+ return insertNode(createGetAttr(obj, field))->output();
1336
+ }
1337
+ TORCH_API Node* createStore(const std::string& name, Value* v);
1338
+ TORCH_API Node* createLoad(const std::string& name, const TypePtr& type);
1339
+ TORCH_API Node* createIsInstance(Value* v, at::ArrayRef<TypePtr> types);
1340
+
1341
+ TORCH_API Value* insertUncheckedCast(Value* v, TypePtr type);
1342
+
1343
+ // Insert a ToList operator with argument \p v and output type \p type.
1344
+ // \returns the output of the operation.
1345
+ TORCH_API Value* insertToList(Value* v, TypePtr type);
1346
+
1347
+ TORCH_API Value* insertFunctionCall(
1348
+ Function* callee,
1349
+ const MatchedSchema& matched);
1350
+ TORCH_API Value* insertMethodCall(
1351
+ std::string method_name,
1352
+ const MatchedSchema& matched);
1353
+
1354
+ // Note: defined in python_ir.cpp and can be used only in python extension
1355
+ Node* createPythonOp(
1356
+ THPObjectPtr&& pyobj,
1357
+ const std::string& cconv,
1358
+ pyobj_list&& scalar_args);
1359
+ // clone n, making a new node in _this_ graph.
1360
+ // use value_map to translate inputs of n to inputs of the cloned node
1361
+ // if copy_blocks is false, it will not recursively clone the nested blocks
1362
+ // this node contains.
1363
+ TORCH_API Node* createClone(
1364
+ Node* n,
1365
+ const std::function<Value*(Value*)>& value_map,
1366
+ bool copy_blocks = true);
1367
+
1368
+ // Insert constant IValue into the graph.
1369
+ TORCH_API Value* insertConstant(
1370
+ const IValue& val,
1371
+ c10::optional<SourceRange> loc = c10::nullopt,
1372
+ c10::optional<ScopePtr> scope = c10::nullopt);
1373
+
1374
+ // Schema-driven insert:
1375
+ // This inserts a node into the graph with inputs determined from args and
1376
+ // kwargs using Python argument matching rules, and checks that the op matches
1377
+ // a known schema.
1378
+ //
1379
+ // If this node successfully completes, it guarentees the node
1380
+ // is a correctly-formed invocation of opname
1381
+ TORCH_API Value* insert(
1382
+ Symbol opname,
1383
+ at::ArrayRef<NamedValue> args,
1384
+ at::ArrayRef<NamedValue> kwargs = {},
1385
+ const c10::optional<SourceRange>& range = {});
1386
+
1387
+ Node* appendNode(Node* n) {
1388
+ return block_->appendNode(n);
1389
+ }
1390
+
1391
+ Node* prependNode(Node* n) {
1392
+ return block_->prependNode(n);
1393
+ }
1394
+
1395
+ // insert before insert_before_ node
1396
+ // initialized to insert at the end of the top level block
1397
+ // can be changed with setInsertPoint()
1398
+ Node* insertNode(Node* n) {
1399
+ AT_ASSERT(
1400
+ insert_before_->inBlockList() &&
1401
+ "insert point node is no longer in a block list");
1402
+ return n->insertBefore(insert_before_);
1403
+ }
1404
+ // set where nodes are inserted to append to the end of this block
1405
+ void setInsertPoint(Block* b) {
1406
+ AT_ASSERT(b->owningGraph() == this);
1407
+ setInsertPoint(b->return_node());
1408
+ }
1409
+ // set where nodes are inserted to insert _before_ this node
1410
+ // for implementation simplicity we only support inserting before a node for
1411
+ // now
1412
+ void setInsertPoint(Node* n) {
1413
+ AT_ASSERT(n->owningGraph() == this && n->inBlockList());
1414
+ insert_before_ = n;
1415
+ predicted_insert_count_ = 0;
1416
+ }
1417
+ Node* insertPoint() {
1418
+ return insert_before_;
1419
+ }
1420
+
1421
+ // the top level block
1422
+ Block* block() {
1423
+ return block_;
1424
+ }
1425
+ const Block* block() const {
1426
+ return block_;
1427
+ }
1428
+
1429
+ // Checks well-formedness and invariants of graph
1430
+ TORCH_API void lint() const;
1431
+ // for use in debugger
1432
+ TORCH_API void dump() const;
1433
+
1434
+ TORCH_API ~Graph();
1435
+
1436
+ TORCH_API std::string toString(bool print_source_locations = true) const;
1437
+
1438
+ TORCH_API std::ostream& print(
1439
+ std::ostream& out,
1440
+ bool print_source_locations = true) const;
1441
+
1442
+ friend TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
1443
+
1444
+ TORCH_API std::shared_ptr<Graph> copy();
1445
+ TORCH_API std::unique_ptr<Graph> copyUnique();
1446
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1447
+
1448
+ private:
1449
+ friend TORCH_API void Lint(const AliasDb* db);
1450
+ TORCH_API void freeNode(Node* n);
1451
+ TORCH_API void freeValue(Value* v);
1452
+ TORCH_API void freeBlock(Block* b);
1453
+ void cloneFrom(Graph& src);
1454
+ };
1455
+
1456
+ /** \brief An utility class for setting temporary insertion points.
1457
+ *
1458
+ * When an object of this class is created, it stores the current insertion
1459
+ * point, sets the new one, and restores the original insertion point when the
1460
+ * object is destroyed.
1461
+ */
1462
+ struct WithInsertPoint {
1463
+ WithInsertPoint(Node* n) : prev_(n->owningGraph()->insertPoint()) {
1464
+ n->owningGraph()->setInsertPoint(n);
1465
+ }
1466
+ WithInsertPoint(Block* b) : WithInsertPoint(b->return_node()) {}
1467
+
1468
+ ~WithInsertPoint() {
1469
+ prev_->owningGraph()->setInsertPoint(prev_);
1470
+ }
1471
+
1472
+ private:
1473
+ Node* prev_;
1474
+ };
1475
+
1476
+ /** \brief An utility class for setting temporary scopes.
1477
+ *
1478
+ * When an object of this class is created, it stores the current scope, sets
1479
+ * the new one, and restores the original scope when the object is destroyed.
1480
+ */
1481
+ struct WithCurrentScope {
1482
+ WithCurrentScope(Graph& g, ScopePtr scope)
1483
+ : graph_(&g), prev_scope_(g.current_scope()) {
1484
+ g.set_current_scope(std::move(scope));
1485
+ }
1486
+ ~WithCurrentScope() {
1487
+ graph_->set_current_scope(prev_scope_);
1488
+ }
1489
+
1490
+ private:
1491
+ Graph* graph_;
1492
+ ScopePtr prev_scope_;
1493
+ };
1494
+
1495
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1496
+ inline Value::Value(Node* node_, size_t offset_)
1497
+ : node_(node_),
1498
+ offset_(offset_),
1499
+ unique_(node_->graph_->next_unique_++),
1500
+ type_(TensorType::get()) {
1501
+ node_->graph_->all_values.emplace(this);
1502
+ }
1503
+
1504
+ inline Value* Value::setType(TypePtr type) {
1505
+ AT_ASSERT(type);
1506
+ if (auto dyn = type->castRaw<c10::DynamicType>()) {
1507
+ type = dyn->fallback();
1508
+ }
1509
+ type_ = std::move(type);
1510
+ for (Use& use : uses_) {
1511
+ use.user->op_ = nullptr;
1512
+ }
1513
+ return this;
1514
+ }
1515
+
1516
+ inline Graph* Value::owningGraph() {
1517
+ return node()->owningGraph();
1518
+ }
1519
+
1520
+ inline const Graph* Value::owningGraph() const {
1521
+ return node()->owningGraph();
1522
+ }
1523
+
1524
+ /************* All nodes not required to be defined before Graph **************/
1525
+ struct ProfileOp : public Node {
1526
+ static const Symbol Kind;
1527
+ ProfileOp(Graph* graph, std::function<void(std::vector<IValue>&)> callback)
1528
+ : Node(graph, ::c10::prim::profile), callback_(std::move(callback)) {}
1529
+
1530
+ void cloneFrom(Node* other_) override;
1531
+ Node* allocNewInstance(Graph* g) override;
1532
+
1533
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1534
+ return callback_;
1535
+ }
1536
+
1537
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1538
+ callback_ = std::move(callback);
1539
+ }
1540
+
1541
+ bool hasSeenTensor() const {
1542
+ return has_seen_tensor_;
1543
+ }
1544
+
1545
+ void setHasSeenTensor(bool has_seen_tensor) {
1546
+ has_seen_tensor_ = has_seen_tensor;
1547
+ }
1548
+
1549
+ private:
1550
+ std::function<void(std::vector<IValue>&)> callback_;
1551
+ bool has_seen_tensor_ = false;
1552
+ };
1553
+
1554
+ struct TORCH_API ProfileIValueOp : public Node {
1555
+ static const Symbol Kind;
1556
+ ProfileIValueOp(
1557
+ Graph* graph,
1558
+ std::function<void(std::vector<IValue>&)> callback)
1559
+ : Node(graph, ::c10::prim::profile_ivalue),
1560
+ callback_(std::move(callback)) {}
1561
+
1562
+ void cloneFrom(Node* other_) override;
1563
+ Node* allocNewInstance(Graph* g) override;
1564
+
1565
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1566
+ return callback_;
1567
+ }
1568
+
1569
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1570
+ callback_ = std::move(callback);
1571
+ }
1572
+
1573
+ private:
1574
+ std::function<void(std::vector<IValue>&)> callback_;
1575
+ };
1576
+
1577
+ // execute a Python function, used for Ops we can't optimize but that we want to
1578
+ // optimize around
1579
+ //
1580
+ // Note: actual implementation (ConcretePythonOp) is defined in python_ir.cpp
1581
+ // which is not included in libtorch.so. We still include some bits and pieces
1582
+ // of PythonOp here to enable writing simple passes generically. In general,
1583
+ // python-aware bits need to be moved to the descendant classes.
1584
+ struct TORCH_API PythonOp : public Node {
1585
+ using Node::Node;
1586
+
1587
+ virtual std::string name() const = 0;
1588
+ virtual void writeScalars(std::ostream& out) const = 0;
1589
+ void cloneFrom(Node* other_) override = 0;
1590
+ Node* allocNewInstance(Graph* g) override = 0;
1591
+ // recover the autograd.Function instance, if this PythonOp's function
1592
+ // was originally SomeFunction.apply
1593
+ // used in ONNX for discovering symbolics
1594
+ virtual c10::optional<THPObjectPtr> autogradFunction() const = 0;
1595
+
1596
+ virtual void lint_python() const = 0;
1597
+ };
1598
+
1599
+ TORCH_API void LintGraph(const std::shared_ptr<Graph>& graph);
1600
+
1601
+ TORCH_API at::ArrayRef<Value*> createTupleUnpack(Value* v);
1602
+
1603
+ /** Insert graph \p CALLEE into graph \p G using \p INPUTS as input values.
1604
+ * The insertion happens at the current insertion point.
1605
+ * Optionally, one can also pass \p VALUE_MAP to get a map between \p CALLEE
1606
+ * values and their cloned copies in \p G.
1607
+ */
1608
+ TORCH_API std::vector<Value*> insertGraph(
1609
+ Graph& g,
1610
+ Graph& callee,
1611
+ ArrayRef<Value*> inputs);
1612
+ TORCH_API std::vector<Value*> insertGraph(
1613
+ Graph& g,
1614
+ Graph& callee,
1615
+ ArrayRef<Value*> inputs,
1616
+ std::unordered_map<Value*, Value*>& value_map);
1617
+
1618
+ /** Insert function \p CALLEE after node \p TO_REPLACE, remove the node and
1619
+ * replace all its uses with corresponding outputs of the inserted function.
1620
+ * This asserts that the number of outputs of the original node and the
1621
+ * graph are the same.
1622
+ */
1623
+ TORCH_API std::vector<Value*> inlineCallTo(
1624
+ Node* to_replace,
1625
+ GraphFunction* callee,
1626
+ bool use_graph = true);
1627
+
1628
+ TORCH_API std::vector<Value*> inlineCallTo(
1629
+ Node* to_replace,
1630
+ GraphFunction* callee,
1631
+ Graph* callee_graph);
1632
+
1633
+ /** If there is only one value in \p OUTPUTS and its kind is Tuple, insert a
1634
+ * tuple unpack node and return the resulting values.
1635
+ */
1636
+ TORCH_API std::vector<Value*> unpackOutputs(const std::vector<Value*>& outputs);
1637
+
1638
+ TORCH_API std::vector<Node*> findAllNodes(Graph& g, Symbol kind, bool recurse);
1639
+ TORCH_API std::vector<Node*> findAllNodes(Block& b, Symbol kind, bool recurse);
1640
+ TORCH_API std::vector<Node*> findAllNodes(
1641
+ at::ArrayRef<Block*> a,
1642
+ Symbol kind,
1643
+ bool recurse);
1644
+
1645
+ struct TORCH_API OperatorSet {
1646
+ OperatorSet(std::initializer_list<const char*> sig_literals);
1647
+ std::vector<std::shared_ptr<Operator>> getOps() const;
1648
+ void insert(std::initializer_list<const char*> sig_literals);
1649
+
1650
+ private:
1651
+ friend struct Node;
1652
+ std::unordered_map<Symbol, std::vector<std::shared_ptr<Operator>>> ops;
1653
+ };
1654
+
1655
+ template <typename T>
1656
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1657
+ struct OperatorMap {
1658
+ // Type aliasing
1659
+ using OpMapType = typename std::pair<std::shared_ptr<Operator>, T>;
1660
+ using ValueType = std::vector<OpMapType>;
1661
+ using MapType = std::unordered_map<Symbol, ValueType>;
1662
+
1663
+ OperatorMap() = default;
1664
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1665
+ explicit OperatorMap(
1666
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> init) {
1667
+ insert(init);
1668
+ }
1669
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1670
+ explicit OperatorMap(std::initializer_list<std::pair<const char*, T>> init) {
1671
+ insert(init);
1672
+ }
1673
+
1674
+ void insert(const std::shared_ptr<Operator>& op, T val) {
1675
+ // Remove if exists before insert
1676
+ erase(op);
1677
+ map[Symbol::fromQualString(op->schema().name())].emplace_back(
1678
+ std::make_pair(op, val));
1679
+ }
1680
+
1681
+ void insert(const OperatorSet& op_set, T val) {
1682
+ for (auto& op : op_set.getOps()) {
1683
+ insert(op, val);
1684
+ }
1685
+ }
1686
+
1687
+ void insert(
1688
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> v) {
1689
+ for (auto& el : v) {
1690
+ insert(el.first, el.second);
1691
+ }
1692
+ }
1693
+
1694
+ void insert(std::initializer_list<std::pair<const char*, T>> v) {
1695
+ for (auto& el : v) {
1696
+ insert(getOperatorForLiteral(el.first), el.second);
1697
+ }
1698
+ }
1699
+
1700
+ void erase(const std::shared_ptr<Operator>& op) {
1701
+ auto it = map.find(Symbol::fromQualString(op->schema().name()));
1702
+ if (it == map.end()) {
1703
+ return;
1704
+ }
1705
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1706
+ if (vit->first->schema() == op->schema()) {
1707
+ it->second.erase(vit);
1708
+ break;
1709
+ }
1710
+ }
1711
+ if (it->second.size() == 0) {
1712
+ map.erase(Symbol::fromQualString(op->schema().name()));
1713
+ }
1714
+ }
1715
+
1716
+ bool contains(const Operator& op) const {
1717
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1718
+ if (it == map.end()) {
1719
+ return false;
1720
+ }
1721
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1722
+ if (vit->first->schema() == op.schema()) {
1723
+ return true;
1724
+ }
1725
+ }
1726
+ return false;
1727
+ }
1728
+
1729
+ bool contains(const Node* n) const {
1730
+ return n->maybeOperator() && contains(n->getOperator());
1731
+ }
1732
+
1733
+ c10::optional<T> find(const Operator& op) {
1734
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1735
+ if (it == map.end()) {
1736
+ return c10::nullopt;
1737
+ }
1738
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1739
+ if (vit->first->schema() == op.schema()) {
1740
+ return vit->second;
1741
+ }
1742
+ }
1743
+ return c10::nullopt;
1744
+ }
1745
+
1746
+ // TODO: return iterator
1747
+ std::vector<OpMapType> getAllKeysAndValues() const {
1748
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1749
+ std::vector<OpMapType> keys_values;
1750
+ for (auto& symbol_mapping : map) {
1751
+ auto& vec = symbol_mapping.second;
1752
+ for (auto& pair : vec) {
1753
+ keys_values.push_back(pair);
1754
+ }
1755
+ }
1756
+ return keys_values;
1757
+ }
1758
+
1759
+ private:
1760
+ friend struct Node;
1761
+ MapType map;
1762
+ };
1763
+
1764
+ template <typename T>
1765
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1766
+ struct FunctionSchemaMap {
1767
+ // Type aliasing
1768
+ using FuncSchemaMapType = typename std::pair<FunctionSchema, T>;
1769
+ using ValueType = std::vector<FuncSchemaMapType>;
1770
+ using MapType = std::unordered_map<Symbol, ValueType>;
1771
+
1772
+ FunctionSchemaMap() = default;
1773
+ void insert(const FunctionSchema& schema, T val) {
1774
+ // Remove if exists before insert
1775
+ erase(schema);
1776
+ map[Symbol::fromQualString(schema.name())].emplace_back(
1777
+ std::make_pair(schema, val));
1778
+ }
1779
+
1780
+ void erase(const FunctionSchema& schema) {
1781
+ auto it = map.find(Symbol::fromQualString(schema.name()));
1782
+ if (it == map.end()) {
1783
+ return;
1784
+ }
1785
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1786
+ if (vit->first == schema) {
1787
+ it->second.erase(vit);
1788
+ break;
1789
+ }
1790
+ }
1791
+ if (it->second.size() == 0) {
1792
+ map.erase(Symbol::fromQualString(schema.name()));
1793
+ }
1794
+ }
1795
+
1796
+ bool contains(const FunctionSchema& schema) const {
1797
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1798
+ if (it == map.end()) {
1799
+ return false;
1800
+ }
1801
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1802
+ if (vit->first->schema() == schema) {
1803
+ return true;
1804
+ }
1805
+ }
1806
+ return false;
1807
+ }
1808
+
1809
+ c10::optional<T> find(const FunctionSchema& schema) const {
1810
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1811
+ if (it == map.end()) {
1812
+ return c10::nullopt;
1813
+ }
1814
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1815
+ if (vit->first == schema) {
1816
+ return vit->second;
1817
+ }
1818
+ }
1819
+ return c10::nullopt;
1820
+ }
1821
+
1822
+ // TODO: return iterator
1823
+ std::vector<FuncSchemaMapType> getAllKeysAndValues() const {
1824
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1825
+ std::vector<FuncSchemaMapType> keys_values;
1826
+ for (auto& symbol_mapping : map) {
1827
+ auto& vec = symbol_mapping.second;
1828
+ for (auto& pair : vec) {
1829
+ keys_values.push_back(pair);
1830
+ }
1831
+ }
1832
+ return keys_values;
1833
+ }
1834
+
1835
+ private:
1836
+ friend struct Node;
1837
+ MapType map;
1838
+ };
1839
+
1840
+ } // namespace jit
1841
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir_views.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct IfView {
10
+ explicit IfView(Node* node) : node_(node) {
11
+ AT_ASSERT(node->kind() == ::c10::prim::If);
12
+ }
13
+ Value* cond() const {
14
+ return node_->input(0);
15
+ }
16
+ Block* thenBlock() const {
17
+ return node_->blocks().at(0);
18
+ }
19
+ Block* elseBlock() const {
20
+ return node_->blocks().at(1);
21
+ }
22
+ ArrayRef<Value*> thenOutputs() const {
23
+ return thenBlock()->outputs();
24
+ }
25
+ ArrayRef<Value*> elseOutputs() const {
26
+ return elseBlock()->outputs();
27
+ }
28
+ ArrayRef<Value*> outputs() const {
29
+ return node_->outputs();
30
+ }
31
+ Node* node() const {
32
+ return node_;
33
+ }
34
+ operator Node*() const {
35
+ return node_;
36
+ }
37
+
38
+ void permuteOutputs(const std::vector<size_t>& new_output_order) {
39
+ node_->permuteOutputs(new_output_order);
40
+ thenBlock()->permuteOutputs(new_output_order);
41
+ elseBlock()->permuteOutputs(new_output_order);
42
+ }
43
+
44
+ private:
45
+ Node* node_;
46
+ };
47
+
48
+ struct LoopView {
49
+ explicit LoopView(Node* node) : node_(node) {
50
+ AT_ASSERT(
51
+ node->kind() == ::c10::prim::Loop || node->kind() == ::c10::onnx::Loop);
52
+ }
53
+ Block* bodyBlock() const {
54
+ return node_->blocks().at(0);
55
+ }
56
+ Value* cond() const {
57
+ return node_->input(0);
58
+ }
59
+ Value* maxTripCount() const {
60
+ return node_->input(0);
61
+ }
62
+ Value* inputCond() const {
63
+ return node_->input(1);
64
+ }
65
+ Value* nextCond() const {
66
+ return bodyBlock()->outputs().at(0);
67
+ }
68
+ Value* currentTripCount() const {
69
+ return bodyBlock()->inputs().at(0);
70
+ }
71
+ ArrayRef<Value*> carriedInputs() const {
72
+ // skip trip count and cond
73
+ return node_->inputs().slice(2);
74
+ }
75
+ ArrayRef<Value*> carriedInputsWithCond() const {
76
+ // skip trip count and cond
77
+ return node_->inputs().slice(1);
78
+ }
79
+ ArrayRef<Value*> carriedOutputs() const {
80
+ return node_->outputs();
81
+ }
82
+ ArrayRef<Value*> bodyCarriedInputs() const {
83
+ // skip trip count and cond
84
+ return bodyBlock()->inputs().slice(1);
85
+ }
86
+ ArrayRef<Value*> bodyCarriedOutputs() const {
87
+ return bodyBlock()->outputs().slice(1);
88
+ }
89
+ Node* node() const {
90
+ return node_;
91
+ }
92
+ operator Node*() const {
93
+ return node_;
94
+ }
95
+
96
+ void permuteLoopCarried(const std::vector<size_t>& new_output_order) {
97
+ node_->permuteOutputs(new_output_order);
98
+ // skip trip count and cond
99
+ node_->permuteInputs(adjustIndices(2, new_output_order));
100
+ auto adjusted_block_order = adjustIndices(1, new_output_order);
101
+ bodyBlock()->permuteOutputs(adjusted_block_order);
102
+ bodyBlock()->permuteInputs(adjusted_block_order);
103
+ }
104
+
105
+ void replaceMaxTripCount(Value* new_max_trip_count) {
106
+ node_->replaceInput(0, new_max_trip_count);
107
+ }
108
+ void replaceInputCondition(Value* new_input_condition) {
109
+ node_->replaceInput(1, new_input_condition);
110
+ }
111
+
112
+ // our way of encoding loops makes them difficult to turn back into python
113
+ // syntax. we have to check properties of the condition and trip count inputs
114
+ // to figure out which one it initially was. ModifiedLoops are not directly
115
+ // mappable to either For or While
116
+ enum LoopType { While, For, ModifiedLoop };
117
+
118
+ LoopType loopType() {
119
+ auto trip_count = toIValue(maxTripCount());
120
+ auto cond_input = toIValue(inputCond());
121
+ auto cond_next = toIValue(nextCond());
122
+
123
+ bool condition_is_always_true =
124
+ cond_input && cond_input->toBool() && cond_next && cond_next->toBool();
125
+ bool trip_count_is_specified = !trip_count || // trip is not a constant
126
+ trip_count->toInt() !=
127
+ std::numeric_limits<int64_t>::max() || // it is a constant but not
128
+ // the default one
129
+ !currentTripCount()
130
+ ->uses()
131
+ .empty(); // it is actually being used in the body.
132
+
133
+ if (condition_is_always_true) {
134
+ // if the trip count was not specified this was a user-written while True:
135
+ return trip_count_is_specified ? For : While;
136
+ } else {
137
+ if (trip_count_is_specified) {
138
+ return ModifiedLoop;
139
+ }
140
+ return While;
141
+ }
142
+ }
143
+
144
+ private:
145
+ Node* node_;
146
+
147
+ // adjust index_ordering by adding indices 0 - thorugh adjust, and
148
+ // incrementing all existing inputs by adjust
149
+ static std::vector<size_t> adjustIndices(
150
+ size_t adjust,
151
+ const std::vector<size_t>& index_ordering) {
152
+ std::vector<size_t> adjusted;
153
+ adjusted.reserve(adjust + index_ordering.size());
154
+ for (const auto i : c10::irange(adjust)) {
155
+ adjusted.push_back(i);
156
+ }
157
+ for (auto index : index_ordering) {
158
+ adjusted.push_back(index + adjust);
159
+ }
160
+ return adjusted;
161
+ }
162
+ };
163
+ } // namespace jit
164
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/irparser.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <string>
5
+ #include <unordered_map>
6
+
7
+ #include <c10/util/Optional.h>
8
+ #include <torch/csrc/Export.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ struct Graph;
14
+ struct Value;
15
+
16
+ // \brief Parse IR from \p STR constructing the corresponding IR in\ GRAPH.
17
+ // if parse_tensor_constants is true will construct empty tensors
18
+ // for Tensor constants with random or unitialized contents, otherwise will
19
+ // throw
20
+ TORCH_API void parseIR(
21
+ const std::string& str,
22
+ torch::jit::Graph* graph,
23
+ bool parse_tensor_constants = false);
24
+
25
+ /** \brief Parse IR from \p STR constructing the corresponding IR in\ GRAPH.
26
+ *
27
+ * \p VMAP is filled with String to Value pairs allowing to index Values in the
28
+ * newly created graph by their name in the original IR string.
29
+ * if parse_tensor_constants is true will construct empty tensors
30
+ * for Tensor constants with random or unitialized contents, otherwise will
31
+ * throw
32
+ */
33
+ TORCH_API void parseIR(
34
+ const std::string& str,
35
+ torch::jit::Graph* graph,
36
+ std::unordered_map<std::string, Value*>& vmap,
37
+ bool parse_tensor_constants = false);
38
+
39
+ } // namespace jit
40
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/named_value.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <torch/csrc/jit/frontend/source_range.h>
4
+ #include <torch/csrc/jit/ir/constants.h>
5
+ #include <torch/csrc/utils/variadic.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Value;
11
+
12
+ /**
13
+ * A value with optional extra name and location information. Used during
14
+ * schema matching to provide extra error information and resolve kwargs.
15
+ */
16
+ struct NamedValue {
17
+ NamedValue(const SourceRange& loc, const std::string& name, Value* value)
18
+ : loc_(loc), name_(name), value_(value) {}
19
+ NamedValue(const SourceRange& loc, Value* value) : loc_(loc), value_(value) {}
20
+
21
+ /* implicit */ NamedValue(Value* value) : value_(value) {}
22
+ NamedValue(const std::string& name, Value* value)
23
+ : name_(name), value_(value) {}
24
+
25
+ /* implicit */ NamedValue(IValue value)
26
+ : value_(nullptr), ivalue_(std::move(value)) {}
27
+
28
+ NamedValue(const std::string& name, IValue value)
29
+ : name_(name), ivalue_(std::move(value)) {}
30
+
31
+ template <
32
+ typename T,
33
+ typename = enable_if_t<
34
+ (!std::is_same<decay_t<T>, NamedValue>::value &&
35
+ !std::is_same<decay_t<T>, Value*>::value &&
36
+ !std::is_same<decay_t<T>, IValue>::value)>>
37
+ // NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
38
+ NamedValue(T&& t) : NamedValue(IValue(std::forward<T>(t))) {}
39
+
40
+ template <
41
+ typename T,
42
+ typename = enable_if_t<
43
+ (!std::is_same<decay_t<T>, Value*>::value &&
44
+ !std::is_same<decay_t<T>, IValue>::value)>>
45
+ NamedValue(const std::string& name, T&& t)
46
+ : NamedValue(name, IValue(std::forward<T>(t))) {}
47
+
48
+ SourceRange locOr(const SourceRange& backup_location) const {
49
+ if (!loc_)
50
+ return backup_location;
51
+ return loc();
52
+ }
53
+
54
+ // note: this will insert a constant node into the graph at the current
55
+ // insert point if this NamedValue is actually a constant
56
+ Value* value(Graph& g) const {
57
+ if (!value_)
58
+ return insertConstant(
59
+ g, ivalue_); // use insertConstant to remove need to include ir.h here
60
+ return value_;
61
+ }
62
+
63
+ const std::string& name() const {
64
+ AT_ASSERT(name_);
65
+ return *name_;
66
+ }
67
+
68
+ const SourceRange& loc() const {
69
+ AT_ASSERT(loc_);
70
+ return *loc_;
71
+ }
72
+
73
+ at::TypePtr type() const;
74
+
75
+ private:
76
+ c10::optional<SourceRange> loc_;
77
+ c10::optional<std::string> name_;
78
+ Value* value_{nullptr};
79
+ // only valid if value_ == nullptr;
80
+ IValue ivalue_;
81
+ };
82
+
83
+ } // namespace jit
84
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/node_hashing.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ struct TORCH_API HashNode {
9
+ size_t operator()(const Node* k) const;
10
+ };
11
+
12
+ struct TORCH_API EqualNode {
13
+ bool operator()(const Node* lhs, const Node* rhs) const;
14
+ };
15
+
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/scope.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <ATen/core/symbol.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/frontend/source_range.h>
8
+ #include <unordered_map>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ struct ModuleInstanceInfo;
13
+ constexpr size_t kModuleInstanceInfo = 2;
14
+
15
+ namespace utils {
16
+ std::string get_module_info(const ModuleInstanceInfo& module_instance_info);
17
+ } // namespace utils
18
+
19
+ // Scope is a node of a trie that represents the tree of nested scopes.
20
+ // Individual scopes are pushed and popped from Graph, which holds a
21
+ // pointer to the current scope. Each Node in Graph holds a pointer
22
+ // to the scope that was current when the node was created.
23
+ // The trie never needs to shrink, it only grows until it is disposed
24
+ // of when Graph is deallocated. Hence, pointers to scopes held by nodes
25
+ // will always be valid as long as Graph is alive.
26
+ struct Scope;
27
+ using ScopePtr = c10::intrusive_ptr<Scope>;
28
+ using c10::Symbol;
29
+
30
+ struct TORCH_API Scope : public c10::intrusive_ptr_target {
31
+ private:
32
+ ScopePtr parent_;
33
+ Symbol name_;
34
+ ScopePtr intrusive_from_this();
35
+
36
+ public:
37
+ Scope();
38
+
39
+ Scope(ScopePtr parent, Symbol name);
40
+
41
+ ScopePtr push(Symbol name);
42
+
43
+ ScopePtr parent();
44
+
45
+ bool isRoot() const;
46
+
47
+ bool isBlank() const;
48
+
49
+ ScopePtr getRoot();
50
+
51
+ size_t getDepth();
52
+
53
+ Symbol name() const;
54
+
55
+ std::string namesFromRoot(const std::string& separator = "/") const;
56
+ };
57
+
58
+ struct Function;
59
+ struct InlinedCallStack;
60
+
61
+ /**
62
+ * ModuleInstanceInfo is a structure to include the module type and instance
63
+ * name. It also provide public methods to get the pointer to module type and
64
+ * instance name.
65
+ *
66
+ * This structure is mainly used as a private member in InlinedCallStack, such
67
+ * that one can follow the callstack to find the relevant module hierarchy.
68
+ */
69
+ struct ModuleInstanceInfo {
70
+ private:
71
+ c10::ClassTypePtr module_type_{nullptr};
72
+ std::string instance_name_;
73
+
74
+ public:
75
+ ModuleInstanceInfo() = default;
76
+ ModuleInstanceInfo(c10::ClassTypePtr module_type, std::string instance_name);
77
+ c10::ClassTypePtr class_type() {
78
+ return module_type_;
79
+ }
80
+ c10::ClassTypePtr class_type() const {
81
+ return module_type_;
82
+ }
83
+ std::string instance_name() const {
84
+ return instance_name_;
85
+ }
86
+
87
+ bool operator==(const ModuleInstanceInfo& rhs) const {
88
+ return (class_type() == rhs.class_type()) &&
89
+ (instance_name() == rhs.instance_name());
90
+ }
91
+ };
92
+
93
+ /**
94
+ * InlinedCallStack is an element in a list representing callstack of functions
95
+ * that have been inlined.
96
+ *
97
+ * Each such element holds info about the current callsite (Function and
98
+ * SourceRange) and a pointer to the next element in the list. The last element
99
+ * in the list represents the innermost function that was inlined.
100
+ *
101
+ * For instance, if a node has a callstack
102
+ * [foo, source_range1] -> [bar, source_range2]
103
+ * it means that this node was originally from function 'bar' that was called
104
+ * at 'source_range2' in function 'foo' that was called in the current function
105
+ * at 'source_range1'.
106
+ *
107
+ * If a node did not come from any inlined function, its callstack will be
108
+ * empty.
109
+ *
110
+ * The callstack lists only grow, we never remove elements from them, which
111
+ * allows us to reuse same elements in different lists. For instance, if we
112
+ * inline function 'bar' to 'foo' and then inline 'foo' to two functions 'ham'
113
+ * and 'baz', the callstacks would look like:
114
+ *
115
+ * [baz, source_range3] --
116
+ * \
117
+ * --> [foo, source_range1] -> [bar, source_range2]
118
+ * /
119
+ * [ham, source_range4] --
120
+ */
121
+ using InlinedCallStackPtr = c10::intrusive_ptr<InlinedCallStack>;
122
+ using InlinedCallStackEntry =
123
+ std::tuple<Function*, SourceRange, c10::optional<ModuleInstanceInfo>>;
124
+
125
+ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target {
126
+ private:
127
+ c10::optional<InlinedCallStackPtr> callee_;
128
+ Function* fn_;
129
+ // Reason for fn_name_ even though we have fn_
130
+ // Serialized callstack is used in circustmances where InlinedCallstack
131
+ // cannot be constructed during runtime, e.g. mobile runtime or
132
+ // delegated backends.
133
+ // Since in those cases we do not have Function* we store function name
134
+ // fn_name does not give you access to the same information that Function*
135
+ // does, however in mobile/delegated backend runtime we use InlindedCallStack
136
+ // for exception stack and for that purpose fn_name_ suffices.
137
+ const std::string fn_name_;
138
+ SourceRange source_range_;
139
+ InlinedCallStackPtr intrusive_from_this();
140
+ c10::optional<ModuleInstanceInfo> module_instance_info_;
141
+
142
+ public:
143
+ // Constructor for a leaf callstack node.
144
+ InlinedCallStack(Function* fn, SourceRange source_range);
145
+
146
+ // Constructor for a leaf callstack node.
147
+ InlinedCallStack(
148
+ Function* fn,
149
+ SourceRange source_range,
150
+ c10::optional<ModuleInstanceInfo> module_instance_info);
151
+
152
+ // Constructor for a leaf callstack node.
153
+ InlinedCallStack(
154
+ Function* fn,
155
+ SourceRange source_range,
156
+ c10::optional<ModuleInstanceInfo> module_instance_info,
157
+ std::string& function_name);
158
+
159
+ // Constructor for an inner callstack node.
160
+ InlinedCallStack(
161
+ InlinedCallStackPtr callee,
162
+ Function* fn,
163
+ SourceRange source_range);
164
+
165
+ InlinedCallStack(
166
+ InlinedCallStackPtr callee,
167
+ Function* fn,
168
+ SourceRange source_range,
169
+ c10::optional<ModuleInstanceInfo> module_instance_info);
170
+
171
+ InlinedCallStack(
172
+ InlinedCallStackPtr callee,
173
+ Function* fn,
174
+ SourceRange source_range,
175
+ c10::optional<ModuleInstanceInfo> module_instance_info,
176
+ std::string& function_name);
177
+
178
+ // Return next element in the callstack list.
179
+ c10::optional<InlinedCallStackPtr> callee() const;
180
+
181
+ // Return module instance associated with the current element.
182
+ c10::optional<ModuleInstanceInfo> module_instance() const;
183
+
184
+ // Returns the source range of the node
185
+ SourceRange source_range() const;
186
+
187
+ Function* function() const;
188
+
189
+ const std::string& function_name() const;
190
+
191
+ // Return callstack as a vector of [Function, SourceRange] pairs.
192
+ std::vector<InlinedCallStackEntry> vec();
193
+
194
+ void setCallee(c10::optional<InlinedCallStackPtr>);
195
+
196
+ bool operator==(const InlinedCallStack& rhs) const {
197
+ // No need to compare fn_, since source_range equivalence check
198
+ // should suffice.
199
+ return (module_instance().has_value() ==
200
+ rhs.module_instance().has_value()) &&
201
+ (module_instance().has_value() &&
202
+ module_instance().value() == rhs.module_instance().value()) &&
203
+ callee() == rhs.callee() && source_range() == rhs.source_range();
204
+ }
205
+
206
+ bool operator!=(const InlinedCallStack& rhs) const {
207
+ return !(*this == rhs);
208
+ }
209
+ };
210
+
211
+ // {source range, node name, InlinedCallStack}
212
+ // We store node name because same debug infor will be used for
213
+ // profiling as well, so we need to know op names as well.
214
+ using DebugInfoTuple =
215
+ std::tuple<SourceRange, std::string, InlinedCallStackPtr>;
216
+ constexpr size_t kDebugInfoTupleSourceRangeIndex{0};
217
+ constexpr size_t kDebugInfoTupleNodeNameIndex{1};
218
+ constexpr size_t kDebugInfoTupleInlinedCSIndex{2};
219
+ } // namespace jit
220
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ /**
12
+ * \brief A structure describing a match of a pattern in a graph.
13
+ *
14
+ * The structure contains an anchor node, from which the match was found, and
15
+ * match-maps for nodes and values. A match-map specifies the correspondance
16
+ * between nodes in the pattern graph (match-map keys) with nodes in the actual
17
+ * graph (match-map values). We keep such maps for both nodes and values.
18
+ */
19
+ struct Match {
20
+ Node* anchor;
21
+ std::unordered_map<const Node*, Node*> nodes_map;
22
+ std::unordered_map<const Value*, Value*> values_map;
23
+ };
24
+
25
+ /**
26
+ * \brief Find all matches of a \p PATTERN in a \p GRAPH.
27
+ *
28
+ * The function returns a vector of match-descriptors (see description of
29
+ * `struct Match`).
30
+ *
31
+ * Matching rules:
32
+ * - Pattern graph must contain a single block.
33
+ * - Matched subgraphs do not span across different blocks.
34
+ * - No uses outside the match are allowed, except for Param and Return nodes.
35
+ * Basically, we're matching hammocks, not arbitrary subgraphs.
36
+ * - The pattern graph must return only one value (i.e. it must have a single
37
+ * node leading to return).
38
+ * - Nodes that are not used in computation of the return value in the pattern
39
+ * graph are ignored during matching (IOW, we're essentially performing DCE on
40
+ * the pattern).
41
+ * - Pattern graph nodes cannot alias. TODO: the check not implemented yet.
42
+ * - Aliasing nodes in the graph cannot consitute a match (i.e. through all
43
+ * found matches, no nodes in the subgraph alias with each other). TODO: check
44
+ * not implemented yet.
45
+ * - The matcher will not mutate either the pattern graph or the matched graph.
46
+ * The matched graph is taken as non-const so that Match may contain non-const
47
+ * pointers. This enables clients of this API to use Match to drive mutations.
48
+ *
49
+ * Note [Multi-output Patterns]
50
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
51
+ * Subgraph matcher provides limited support for multi-output patterns. With a
52
+ * single output pattern, a single scan through the graph is sufficient to
53
+ * find all the matches: given a starting node (an "anchor"), we can
54
+ * deterministically check whether a pattern matches a subgraph corresponding to
55
+ * this anchor node. For a general case of multi-output patterns, we would have
56
+ * N anchors, which would result in M^N comparisons (M is the size of the
57
+ * graph). Clearly this is computationally prohibitive.
58
+ *
59
+ * To overcome this, we impose some constraints on the multi-output patterns
60
+ * that we accept. We require that checking whether the pattern matches a
61
+ * subgraph would still be fully determined by a single node in the graph. To
62
+ * achieve this, we designate the first output in the pattern as the "main"
63
+ * output and assume that we can traverse up from this node to match the
64
+ * entire pattern.
65
+ *
66
+ * Corrolary 1: the order of outputs in the pattern matters!
67
+ * Corollary 2: patterns cannot contain any nodes not participating in the main
68
+ * output computation.
69
+ */
70
+ std::vector<Match> TORCH_API
71
+ findPatternMatches(const Graph& pattern, Graph& graph);
72
+
73
+ } // namespace jit
74
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct HashType {
10
+ size_t operator()(const TypePtr& type) const;
11
+ size_t operator()(const c10::ConstTypePtr& type) const;
12
+ };
13
+
14
+ struct EqualType {
15
+ bool operator()(const TypePtr& a, const TypePtr& b) const;
16
+ bool operator()(const c10::ConstTypePtr& a, const c10::ConstTypePtr& b) const;
17
+ };
18
+
19
+ } // namespace jit
20
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/flat_hash_map.h>
3
+ #include <caffe2/serialize/inline_container.h>
4
+ #include <torch/csrc/jit/api/compilation_unit.h>
5
+ #include <torch/csrc/jit/ir/scope.h>
6
+ #include <torch/csrc/jit/serialization/source_range_serialization.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ /*
11
+ * MobileDebugTable:
12
+ * Deserializes debug_pkl and callstack_map records from PT model's zip archive
13
+ * and stores them in a map of debug handles to DebugInfoPair. Debug handles are
14
+ * unique per model and runtime, be in lite interpreter or delegate, an
15
+ * exception of BackendRuntimeException should raised using debug handles.
16
+ * getSourceDebugString method is responsible for translating debug
17
+ * handles to correspond debug information.
18
+ * This debug informatin includes stack trace of model level source code and
19
+ * module hierarchy where the exception occurred.
20
+ */
21
+ class MobileDebugTable {
22
+ public:
23
+ MobileDebugTable() = default;
24
+ MobileDebugTable(
25
+ std::unique_ptr<caffe2::serialize::PyTorchStreamReader>& reader,
26
+ const std::shared_ptr<CompilationUnit>& cu);
27
+
28
+ template <typename It>
29
+ MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {}
30
+
31
+ std::string getSourceDebugString(
32
+ const int64_t debug_handle,
33
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
34
+ std::string getSourceDebugString(
35
+ const std::vector<int64_t>& debug_handles,
36
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
37
+ std::string getModuleHierarchyInfo(
38
+ const int64_t debug_handle,
39
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
40
+ std::string getModuleHierarchyInfo(
41
+ const std::vector<int64_t>& debug_handles,
42
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
43
+
44
+ const ska::flat_hash_map<int64_t, DebugInfoTuple>& getCallStackPtrMap()
45
+ const {
46
+ return callstack_ptr_map_;
47
+ }
48
+
49
+ private:
50
+ std::pair<std::string, std::string> getSourceDebugModuleHierarchyInfo(
51
+ const std::vector<int64_t>& debug_handles,
52
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
53
+ ska::flat_hash_map<int64_t, DebugInfoTuple> callstack_ptr_map_;
54
+ };
55
+
56
+ } // namespace jit
57
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cerrno>
5
+ #include <cstddef>
6
+ #include <cstring>
7
+ #include <fstream>
8
+ #include <istream>
9
+ #include <memory>
10
+
11
+ #include <c10/core/CPUAllocator.h>
12
+ #include <c10/core/impl/alloc_cpu.h>
13
+ #include <caffe2/serialize/read_adapter_interface.h>
14
+
15
+ #if defined(HAVE_MMAP)
16
+ #include <fcntl.h>
17
+ #include <sys/mman.h>
18
+ #include <sys/stat.h>
19
+ #include <sys/types.h>
20
+ #include <unistd.h>
21
+ #endif
22
+
23
+ /**
24
+ * @file
25
+ *
26
+ * Helpers for identifying file formats when reading serialized data.
27
+ *
28
+ * Note that these functions are declared inline because they will typically
29
+ * only be called from one or two locations per binary.
30
+ */
31
+
32
+ namespace torch {
33
+ namespace jit {
34
+
35
+ /**
36
+ * The format of a file or data stream.
37
+ */
38
+ enum class FileFormat {
39
+ UnknownFileFormat = 0,
40
+ FlatbufferFileFormat,
41
+ ZipFileFormat,
42
+ };
43
+
44
+ /// The size of the buffer to pass to #getFileFormat(), in bytes.
45
+ constexpr size_t kFileFormatHeaderSize = 8;
46
+ constexpr size_t kMaxAlignment = 16;
47
+
48
+ /**
49
+ * Returns the likely file format based on the magic header bytes in @p header,
50
+ * which should contain the first bytes of a file or data stream.
51
+ */
52
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
53
+ static inline FileFormat getFileFormat(const char* data) {
54
+ // The size of magic strings to look for in the buffer.
55
+ static constexpr size_t kMagicSize = 4;
56
+
57
+ // Bytes 4..7 of a Flatbuffer-encoded file produced by
58
+ // `flatbuffer_serializer.h`. (The first four bytes contain an offset to the
59
+ // actual Flatbuffer data.)
60
+ static constexpr std::array<char, kMagicSize> kFlatbufferMagicString = {
61
+ 'P', 'T', 'M', 'F'};
62
+ static constexpr size_t kFlatbufferMagicOffset = 4;
63
+
64
+ // The first four bytes of a ZIP file.
65
+ static constexpr std::array<char, kMagicSize> kZipMagicString = {
66
+ 'P', 'K', '\x03', '\x04'};
67
+
68
+ // Note that we check for Flatbuffer magic first. Since the first four bytes
69
+ // of flatbuffer data contain an offset to the root struct, it's theoretically
70
+ // possible to construct a file whose offset looks like the ZIP magic. On the
71
+ // other hand, bytes 4-7 of ZIP files are constrained to a small set of values
72
+ // that do not typically cross into the printable ASCII range, so a ZIP file
73
+ // should never have a header that looks like a Flatbuffer file.
74
+ if (std::memcmp(
75
+ data + kFlatbufferMagicOffset,
76
+ kFlatbufferMagicString.data(),
77
+ kMagicSize) == 0) {
78
+ // Magic header for a binary file containing a Flatbuffer-serialized mobile
79
+ // Module.
80
+ return FileFormat::FlatbufferFileFormat;
81
+ } else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) {
82
+ // Magic header for a zip file, which we use to store pickled sub-files.
83
+ return FileFormat::ZipFileFormat;
84
+ }
85
+ return FileFormat::UnknownFileFormat;
86
+ }
87
+
88
+ /**
89
+ * Returns the likely file format based on the magic header bytes of @p data.
90
+ * If the stream position changes while inspecting the data, this function will
91
+ * restore the stream position to its original offset before returning.
92
+ */
93
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
94
+ static inline FileFormat getFileFormat(std::istream& data) {
95
+ FileFormat format = FileFormat::UnknownFileFormat;
96
+ std::streampos orig_pos = data.tellg();
97
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
98
+ std::array<char, kFileFormatHeaderSize> header;
99
+ data.read(header.data(), header.size());
100
+ if (data.good()) {
101
+ format = getFileFormat(header.data());
102
+ }
103
+ data.seekg(orig_pos, data.beg);
104
+ return format;
105
+ }
106
+
107
+ /**
108
+ * Returns the likely file format based on the magic header bytes of the file
109
+ * named @p filename.
110
+ */
111
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
112
+ static inline FileFormat getFileFormat(const std::string& filename) {
113
+ std::ifstream data(filename, std::ifstream::binary);
114
+ return getFileFormat(data);
115
+ }
116
+
117
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
118
+ static void file_not_found_error() {
119
+ std::stringstream message;
120
+ message << "Error while opening file: ";
121
+ if (errno == ENOENT) {
122
+ message << "no such file or directory" << std::endl;
123
+ } else {
124
+ message << "error no is: " << errno << std::endl;
125
+ }
126
+ TORCH_CHECK(false, message.str());
127
+ }
128
+
129
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
130
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_file_content(
131
+ const char* filename) {
132
+ #if defined(HAVE_MMAP)
133
+ int fd = open(filename, O_RDONLY);
134
+ if (fd < 0) {
135
+ // failed to open file, chances are it's no such file or directory.
136
+ file_not_found_error();
137
+ }
138
+ struct stat statbuf {};
139
+ fstat(fd, &statbuf);
140
+ size_t size = statbuf.st_size;
141
+ void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
142
+ close(fd);
143
+ auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); };
144
+ std::shared_ptr<char> data(reinterpret_cast<char*>(ptr), deleter);
145
+ #else
146
+ FILE* f = fopen(filename, "rb");
147
+ if (f == nullptr) {
148
+ file_not_found_error();
149
+ }
150
+ fseek(f, 0, SEEK_END);
151
+ size_t size = ftell(f);
152
+ fseek(f, 0, SEEK_SET);
153
+ // make sure buffer size is multiple of alignment
154
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
155
+ std::shared_ptr<char> data(
156
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
157
+ fread(data.get(), size, 1, f);
158
+ fclose(f);
159
+ #endif
160
+ return std::make_tuple(data, size);
161
+ }
162
+
163
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
164
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_stream_content(
165
+ std::istream& in) {
166
+ // get size of the stream and reset to orig
167
+ std::streampos orig_pos = in.tellg();
168
+ in.seekg(orig_pos, std::ios::end);
169
+ const long size = in.tellg();
170
+ in.seekg(orig_pos, in.beg);
171
+
172
+ // read stream
173
+ // NOLINT make sure buffer size is multiple of alignment
174
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
175
+ std::shared_ptr<char> data(
176
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
177
+ in.read(data.get(), size);
178
+
179
+ // reset stream to original position
180
+ in.seekg(orig_pos, in.beg);
181
+ return std::make_tuple(data, size);
182
+ }
183
+
184
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
185
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_rai_content(
186
+ caffe2::serialize::ReadAdapterInterface* rai) {
187
+ size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment;
188
+ std::shared_ptr<char> data(
189
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
190
+ rai->read(
191
+ 0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes");
192
+ return std::make_tuple(data, buffer_size);
193
+ }
194
+
195
+ } // namespace jit
196
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <istream>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <unordered_map>
7
+ #include <vector>
8
+
9
+ #include <ATen/core/ivalue.h>
10
+ #include <c10/core/Device.h>
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <torch/csrc/jit/mobile/module.h>
14
+
15
+ /**
16
+ * Defines the public API for loading flatbuffer-serialized mobile modules.
17
+ * Note that this header must not include or depend on flatbuffer-defined
18
+ * types, to avoid leaking those details to PyTorch clients.
19
+ */
20
+
21
+ namespace torch {
22
+ namespace jit {
23
+
24
+ /// All non-copied data pointers provided to `parse_and_initialize_*` functions
25
+ /// must be aligned to this boundary. Since the Module will point directly into
26
+ /// the data, this alignment is necessary to ensure that certain types/structs
27
+ /// are properly aligned.
28
+ constexpr size_t kFlatbufferDataAlignmentBytes = 16;
29
+
30
+ /// Maps file names to file contents.
31
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
32
+
33
+ // On high level, to produce a Module from a file on disk, we need to go
34
+ // through the follow steps:
35
+ // 1. Read: Read the file from disk -> memory
36
+ // 2. Deserialize: Parse the bytes to produce some in memory manipulable
37
+ // structure
38
+ // 3. Module initialization: Produce mobile::Module out of the structure
39
+ // produced in 2.
40
+ // Under this context, the structure described in 2. is the flatbuffer-defined
41
+ // type mobile::serialization::Module. However, this step/type is not visible in
42
+ // the public API.
43
+
44
+ // Parse a mobile::Module from raw bytes.
45
+ //
46
+ // This function does steps 2+3 described above.
47
+ //
48
+ // Does not take ownership of `data`; if you want it to take ownership, see the
49
+ // shared_ptr overload of this function.
50
+ //
51
+ // If should_copy_tensor_memory is true, then the returned module will NOT have
52
+ // refences to `data`, so `data` can be freed immediately.
53
+ //
54
+ // If should_copy_tensor_memory is false, then returned module will have tensors
55
+ // that points inside of `data`; the caller will need to make sure that `data`
56
+ // outlives the returned Module. Also, `data` must be aligned to
57
+ // kFlatbufferDataAlignmentBytes.
58
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
59
+ void* data,
60
+ size_t size, // of `data`, in bytes.
61
+ c10::optional<at::Device> device = c10::nullopt,
62
+ ExtraFilesMap* extra_files = nullptr,
63
+ bool should_copy_tensor_memory = false);
64
+
65
+ // Parse a mobile::Module from raw bytes.
66
+ //
67
+ // This function does steps 2+3 described above.
68
+ //
69
+ // The returned Module holds a reference to `data`, which must be aligned to
70
+ // kFlatbufferDataAlignmentBytes.
71
+ //
72
+ // If you do not want the Module to hold a reference to `data`, see the raw
73
+ // pointer overload of this function.
74
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
75
+ std::shared_ptr<char> data,
76
+ size_t size, // of `data`, in bytes.
77
+ c10::optional<at::Device> device = c10::nullopt,
78
+ ExtraFilesMap* extra_files = nullptr);
79
+
80
+ // Parse a mobile::Module from raw bytes, also returning JIT-related metadata.
81
+ //
82
+ // This is the same as parse_and_initialize_mobile_module() except that it also
83
+ // extracts JIT source files and constants. Can be used to construct a
84
+ // jit::Module.
85
+ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit(
86
+ void* data,
87
+ size_t size, // of `data`, in bytes.
88
+ ExtraFilesMap& jit_sources,
89
+ std::vector<IValue>& jit_constants,
90
+ c10::optional<at::Device> device = c10::nullopt,
91
+ ExtraFilesMap* extra_files = nullptr);
92
+
93
+ // Load a mobile::Module from a filepath.
94
+ //
95
+ // This function does steps 1+2+3 described above.
96
+ //
97
+ // We need to have this as a convienience because Python API will need to wrap
98
+ // this. C++ clients should use one of the versions of
99
+ // parse_and_initialize_mobile_module() so they can manage the raw data more
100
+ // directly.
101
+ TORCH_API mobile::Module load_mobile_module_from_file(
102
+ const std::string& filename,
103
+ c10::optional<at::Device> device = c10::nullopt,
104
+ ExtraFilesMap* extra_files = nullptr);
105
+
106
+ TORCH_API uint64_t get_bytecode_version(std::istream& in);
107
+ TORCH_API uint64_t get_bytecode_version(const std::string& filename);
108
+ TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content);
109
+
110
+ TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer(
111
+ char* flatbuffer_content);
112
+
113
+ // The methods below are less efficient because it need to read the stream in
114
+ // its entirity to a buffer
115
+ TORCH_API mobile::Module load_mobile_module_from_stream_with_copy(
116
+ std::istream& in,
117
+ c10::optional<at::Device> device = c10::nullopt,
118
+ ExtraFilesMap* extra_files = nullptr);
119
+
120
+ TORCH_API mobile::Module parse_flatbuffer_no_object(
121
+ std::shared_ptr<char> data,
122
+ size_t size,
123
+ c10::optional<at::Device> device);
124
+
125
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
126
+ void* data,
127
+ size_t,
128
+ c10::optional<at::Device>,
129
+ ExtraFilesMap* extra_files,
130
+ bool should_copy_tensor_memory);
131
+
132
+ // no op, TODO(qihan) delete
133
+ TORCH_API bool register_flatbuffer_loader();
134
+
135
+ } // namespace jit
136
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/mobile/code.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ class Frame {
13
+ public:
14
+ explicit Frame(const Code& code) : code_(code) {}
15
+ const Code& getCode() const {
16
+ return code_;
17
+ }
18
+
19
+ void step() {
20
+ pc_++;
21
+ }
22
+
23
+ void jump(size_t n) {
24
+ pc_ += n;
25
+ }
26
+
27
+ size_t getPC() const {
28
+ return pc_;
29
+ }
30
+
31
+ const Instruction& getInstruction() const {
32
+ return code_.instructions_.at(pc_);
33
+ }
34
+
35
+ c10::optional<int64_t> getDebugHandle() const {
36
+ return getDebugHandle(pc_);
37
+ }
38
+
39
+ c10::optional<int64_t> getDebugHandle(size_t pc) const {
40
+ if (pc >= code_.debug_handles_.size()) {
41
+ return {};
42
+ }
43
+ return code_.debug_handles_[pc];
44
+ }
45
+
46
+ private:
47
+ const Code& code_;
48
+ size_t pc_{0};
49
+ };
50
+
51
+ } // namespace mobile
52
+ } // namespace jit
53
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <ATen/core/function.h>
6
+ #include <ATen/core/function_schema.h>
7
+ #include <ATen/core/ivalue.h>
8
+ #include <torch/csrc/jit/mobile/code.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ enum OpCode : uint8_t;
13
+ struct Instruction;
14
+ struct OperatorString;
15
+
16
+ namespace mobile {
17
+
18
+ class TORCH_API Function : public torch::jit::Function {
19
+ public:
20
+ explicit Function(c10::QualifiedName name);
21
+ Function(
22
+ c10::QualifiedName name,
23
+ Code code,
24
+ at::optional<c10::FunctionSchema> schema);
25
+ void run(Stack& stack) override;
26
+ at::IValue operator()(Stack& stack);
27
+ void ensure_defined() override {}
28
+ size_t num_inputs() const override;
29
+ const c10::QualifiedName& qualname() const override;
30
+ bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) override;
31
+
32
+ // NOTE: the APIs below is dangerous: if you call append_instruction with
33
+ // dbg_handle and then call it without; then the dbg_handle will become
34
+ // misaligned. Therefore only use ONE variant at time.
35
+ void append_instruction(OpCode op, int X, int N, int64_t dbg_handle);
36
+ void append_instruction(OpCode op, int X, int N);
37
+ void append_operator(
38
+ const std::string& name,
39
+ const std::string& overload_name,
40
+ const c10::optional<int>& num_specified_args);
41
+ void append_constant(const c10::IValue& constant);
42
+ void append_type(const c10::TypePtr& type);
43
+ void append_function(mobile::Function& func);
44
+
45
+ void set_register_size(size_t size);
46
+
47
+ int64_t get_debug_handle(size_t pc) const;
48
+ const Code& get_code() const;
49
+ Code& get_code();
50
+
51
+ torch::jit::Function& setSchema(c10::FunctionSchema schema) override;
52
+ bool hasSchema() const;
53
+ const c10::FunctionSchema& getSchema() const override;
54
+
55
+ // Returns the debug handle corresponding to where the execution
56
+ // is halted due to exception.
57
+ // If no corresponding debug handle is found then -1 is returned.
58
+ const std::vector<int64_t>& getExceptionDebugHandles() const;
59
+ static Function& registerFunc(
60
+ const std::string& qualified_name,
61
+ const std::vector<Instruction>& instructions,
62
+ const std::vector<c10::IValue>& constants,
63
+ const std::vector<c10::TypePtr>& types,
64
+ const size_t register_size);
65
+
66
+ // if not initialize, initialize by loading operators.
67
+ // return true of all op loaded, return false if some op is not found
68
+ // in the current runtime. Then, the ops that did not found will be filled
69
+ // in unsupported_op_names
70
+ bool initialize_operators(bool should_check_operators);
71
+
72
+ private:
73
+ c10::QualifiedName name_;
74
+ Code code_;
75
+ at::optional<c10::FunctionSchema> schema_; // (byte-code version 4+)
76
+ };
77
+
78
+ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
79
+ c10::OperatorName opname,
80
+ c10::optional<int> num_specified_args);
81
+
82
+ TORCH_API std::string operator_str(const c10::OperatorName& opname);
83
+
84
+ } // namespace mobile
85
+ } // namespace jit
86
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/module.h>
3
+ #include <torch/csrc/jit/mobile/parse_operators.h>
4
+
5
+ #include <istream>
6
+ #include <memory>
7
+
8
+ #include <caffe2/serialize/file_adapter.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ using caffe2::serialize::FileAdapter;
13
+ using caffe2::serialize::IStreamAdapter;
14
+ using caffe2::serialize::ReadAdapterInterface;
15
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
16
+
17
+ constexpr const char* kArchiveNameBytecode = "bytecode";
18
+ constexpr const char* kArchiveNameConstants = "constants";
19
+ constexpr const char* kArchiveNameVersion = "version";
20
+
21
+ // The family of methods below load a serialized Mobile Module
22
+ // into a mobile::Module object.
23
+ TORCH_API mobile::Module _load_for_mobile(
24
+ std::istream& in,
25
+ c10::optional<at::Device> device,
26
+ ExtraFilesMap& extra_file,
27
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
28
+
29
+ TORCH_API mobile::Module _load_for_mobile(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device,
32
+ ExtraFilesMap& extra_files);
33
+
34
+ TORCH_API mobile::Module _load_for_mobile(
35
+ std::unique_ptr<ReadAdapterInterface> rai,
36
+ c10::optional<c10::Device> device,
37
+ ExtraFilesMap& extra_files,
38
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
39
+
40
+ TORCH_API mobile::Module _load_for_mobile(
41
+ const std::string& filename,
42
+ c10::optional<at::Device> device,
43
+ ExtraFilesMap& extra_files,
44
+ uint64_t module_load_options);
45
+
46
+ TORCH_API mobile::Module _load_for_mobile(
47
+ std::istream& in,
48
+ c10::optional<at::Device> device = c10::nullopt);
49
+
50
+ TORCH_API mobile::Module _load_for_mobile(
51
+ const std::string& filename,
52
+ c10::optional<at::Device> device = c10::nullopt);
53
+
54
+ TORCH_API mobile::Module _load_for_mobile(
55
+ std::unique_ptr<ReadAdapterInterface> rai,
56
+ c10::optional<c10::Device> device = c10::nullopt);
57
+
58
+ /**
59
+ * Load only the contents of the "extra/" files whose names are
60
+ * passed in the map (extra_files). Populate the corresponding values
61
+ * with the contents of those files. Do not attempt to load the entire
62
+ * model, and stop once the extra files have been extracted.
63
+ *
64
+ * This API is needed to be able to load GPU models on linux CPU
65
+ * machines and extract only the extra files so that we can inspect
66
+ * the metadata that was added to the .ptl archive when it was
67
+ * generated.
68
+ *
69
+ */
70
+ void _load_extra_only_for_mobile(
71
+ const std::string& filename,
72
+ c10::optional<at::Device> device,
73
+ ExtraFilesMap& extra_files);
74
+
75
+ // Currently used by both mobile/import.cpp and model_compatibility.cpp.
76
+ // Should be removed after model_compatibility.cpp start using simplified
77
+ // version type_resolver and obj_loader.
78
+ at::TypePtr resolveTypeNameMobile(
79
+ const c10::QualifiedName& qn,
80
+ std::shared_ptr<CompilationUnit> compilation_unit);
81
+ c10::StrongTypePtr typeResolverMobile(
82
+ const c10::QualifiedName& qn,
83
+ const std::shared_ptr<CompilationUnit>& compilation_unit);
84
+ c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
85
+ const at::StrongTypePtr& type,
86
+ const at::IValue& input,
87
+ mobile::CompilationUnit& mobile_compilation_unit);
88
+
89
+ // Given a reader, which has access to a model file,
90
+ // return true if there exists tensors in `bytecode` archive
91
+ bool isTensorInBytecodeArchive(
92
+ caffe2::serialize::PyTorchStreamReader& stream_reader);
93
+
94
+ namespace mobile {
95
+
96
+ /**
97
+ * Given a torch::jit::mobile::Module, return a set of operator names
98
+ * (with overload name) that are used by any method in this mobile
99
+ * Mobile. This method runs through the bytecode for all methods
100
+ * in the specified model (module), and extracts all the root
101
+ * operator names. Root operators are operators that are called
102
+ * directly by the model (as opposed to non-root operators, which
103
+ * may be called transitively by the root operators).
104
+ *
105
+ */
106
+ TORCH_API std::set<std::string> _export_operator_list(
107
+ torch::jit::mobile::Module& module);
108
+
109
+ } // namespace mobile
110
+
111
+ } // namespace jit
112
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * @file
5
+ * Declarations shared between import_data.cpp and export_data.cpp
6
+ */
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ namespace internal {
13
+ /**
14
+ * The name of the mobile::Module attribute which contains saved parameters, as
15
+ * a Dict of names to Tensors. Only used for Flatbuffer serialization.
16
+ */
17
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
18
+ constexpr char kSavedParametersAttributeName[] = "data";
19
+ } // namespace internal
20
+
21
+ } // namespace mobile
22
+ } // namespace jit
23
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <torch/csrc/jit/mobile/code.h>
6
+ #include <torch/csrc/jit/mobile/frame.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ struct InterpreterState {
13
+ TORCH_API explicit InterpreterState(const Code& code);
14
+ TORCH_API bool run(Stack& stack);
15
+
16
+ private:
17
+ void enterFrame(const Code&);
18
+ void leaveFrame();
19
+ void saveExceptionDebugHandles();
20
+ void callFunction(torch::jit::Function& f, Stack& stack);
21
+
22
+ c10::IValue& reg(size_t reg);
23
+ std::vector<c10::IValue> registers_;
24
+ std::vector<Frame> frames_;
25
+ };
26
+
27
+ const std::vector<DebugHandle>& getInterpretersExceptionDebugHandles();
28
+ } // namespace mobile
29
+ } // namespace jit
30
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/function.h>
3
+
4
+ namespace torch {
5
+ namespace jit {
6
+ namespace mobile {
7
+ using c10::IValue;
8
+ TORCH_API void parseInstructions(
9
+ const std::string& function_name,
10
+ c10::ivalue::TupleElements&& ins_list,
11
+ c10::ivalue::TupleElements& debug_handles_m_tuple,
12
+ mobile::Function* function);
13
+ TORCH_API void parseConstants(
14
+ const c10::ivalue::TupleElements& consts_list,
15
+ mobile::Function* function);
16
+ TORCH_API void parseTypes(
17
+ const c10::ivalue::TupleElements& types_list,
18
+ mobile::Function* function);
19
+ TORCH_API void parseRegisterSize(size_t rsize, mobile::Function* function);
20
+ TORCH_API void applyUpgrader(
21
+ mobile::Function* function,
22
+ uint64_t operator_version);
23
+ } // namespace mobile
24
+ } // namespace jit
25
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <functional>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ namespace mobile {
10
+
11
+ using Stack = std::vector<c10::IValue>;
12
+
13
+ void registerPrimOpsFunction(
14
+ const std::string& name,
15
+ const std::function<void(Stack&)>& fn);
16
+
17
+ bool hasPrimOpsFn(const std::string& name);
18
+
19
+ std::function<void(Stack&)>& getPrimOpsFn(const std::string& name);
20
+
21
+ class prim_op_fn_register {
22
+ public:
23
+ prim_op_fn_register(
24
+ const std::string& name,
25
+ const std::function<void(Stack&)>& fn) {
26
+ registerPrimOpsFunction(name, fn);
27
+ }
28
+ };
29
+
30
+ } // namespace mobile
31
+ } // namespace jit
32
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/prim_ops_registery.h>
3
+ #include <torch/csrc/jit/mobile/register_ops_common_utils.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ void tupleIndex(Stack& stack);
9
+
10
+ void raiseException(Stack& stack);
11
+
12
+ void is(Stack& stack);
13
+
14
+ void unInitialized(Stack& stack);
15
+
16
+ void isNot(Stack& stack);
17
+
18
+ void aten_format(Stack& stack);
19
+
20
+ void size(Stack& stack);
21
+
22
+ void sym_size(Stack& stack);
23
+
24
+ void sym_size_int(Stack& stack);
25
+
26
+ void sym_stride_int(Stack& stack);
27
+
28
+ void sym_numel(Stack& stack);
29
+
30
+ void sym_storage_offset(Stack& stack);
31
+
32
+ void sym_stride(Stack& stack);
33
+
34
+ void device(Stack& stack);
35
+
36
+ void device_with_index(Stack& stack);
37
+
38
+ void dtype(Stack& stack);
39
+
40
+ void layout(Stack& stack);
41
+
42
+ void toPrimDType(Stack& stack);
43
+
44
+ void dim(Stack& stack);
45
+
46
+ void _not(Stack& stack);
47
+
48
+ void boolTensor(Stack& stack);
49
+
50
+ void toList(Stack& stack);
51
+
52
+ void numToTensorScalar(Stack& stack);
53
+
54
+ void isCuda(Stack& stack);
55
+
56
+ void numToTensorBool(Stack& stack);
57
+
58
+ void dictIndex(Stack& stack);
59
+
60
+ void raiseExceptionWithMessage(Stack& stack);
61
+
62
+ } // namespace jit
63
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <string>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace mobile {
9
+ class Module;
10
+ namespace quantization {
11
+ /*
12
+ * Device side PTQ API.
13
+ * Once the model has been prepared for quantization on server side, such model
14
+ * is sent to device. On device side the model is further trained. At the end of
15
+ * the training, before the model is readied for inference, we need to quantize
16
+ * the model.
17
+ * Usage of this API is as follows.
18
+ * PTQQuanizationHelper ptq_helper;
19
+ * ptq_helper.quantize_dynamic(m, "forward");
20
+ * Args:
21
+ * m: Captured by reference, an instance of mobile::Module. This module will be
22
+ * mutated in place to replace its <method_name> method with quantized
23
+ * equivalent. method:name: Name of the method to be quantized. AOT preparation
24
+ * for quantization must also have been done for this method. Returns: In place
25
+ * mutated `m` whose size should be smaller due to weight quantization and whose
26
+ * <method_name> method should use quantized ops
27
+ */
28
+ class TORCH_API PTQQuanizationHelper {
29
+ public:
30
+ PTQQuanizationHelper() = default;
31
+ void quantize_dynamic(
32
+ torch::jit::mobile::Module& m,
33
+ const std::string& method_name);
34
+ };
35
+ } // namespace quantization
36
+ } // namespace mobile
37
+ } // namespace jit
38
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Context.h>
4
+ #include <ATen/NativeFunctions.h>
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/jit/runtime/jit_exception.h>
8
+ #include <torch/csrc/jit/runtime/vararg_functions.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ inline void noop(Stack& n) {}
14
+
15
+ int64_t normalizeIndex(int64_t idx, int64_t list_size);
16
+
17
+ // reference function THPVariable_to in python_variable_methods.cpp
18
+ static C10_UNUSED at::Tensor to_dispatch(
19
+ at::Tensor self,
20
+ c10::optional<at::Device> device,
21
+ c10::optional<at::ScalarType> scalarType,
22
+ bool non_blocking,
23
+ bool copy) {
24
+ if (device && device->is_cuda()) {
25
+ at::globalContext().lazyInitCUDA();
26
+ }
27
+ if (!device && !scalarType && !copy) {
28
+ return self;
29
+ } else if (!device) {
30
+ return self.to(*scalarType, non_blocking, copy);
31
+ } else if (!scalarType) {
32
+ return self.to(*device, non_blocking, copy);
33
+ } else {
34
+ return self.to(*device, *scalarType, non_blocking, copy);
35
+ }
36
+ }
37
+
38
+ // Convert the tensor pointed to by \p data to a nested list. \p dim is the
39
+ // number of dimensions in the tensor and \p cur_dim is the dimension being
40
+ // processed by the current invocation. \p ty is the expected output IR type of
41
+ // the operation. \p is the scalar type of \p data. \p sizes and \p strides are
42
+ // the sizes and strides of the tensor operand and \p element_size is the size
43
+ // in bytes of one tensor element.
44
+ IValue tensorToListRecursive(
45
+ char* data,
46
+ int64_t cur_dim,
47
+ int64_t num_tensor_dims,
48
+ at::TypePtr ty,
49
+ at::ScalarType scalar_ty,
50
+ at::IntArrayRef sizes,
51
+ at::IntArrayRef strides,
52
+ size_t element_size);
53
+
54
+ } // namespace jit
55
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/dynamic_type.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <unordered_set>
6
+
7
+ namespace c10 {
8
+
9
+ class TORCH_API TypeParser {
10
+ public:
11
+ explicit TypeParser(std::string pythonStr);
12
+ explicit TypeParser(std::vector<std::string>& pythonStrs);
13
+
14
+ TypePtr parse();
15
+ std::vector<TypePtr> parseList();
16
+ static const std::unordered_set<std::string>& getNonSimpleType();
17
+ static const std::unordered_set<std::string>& getCustomType();
18
+ std::unordered_set<std::string> getContainedTypes();
19
+
20
+ private:
21
+ TypePtr parseNamedTuple(const std::string& qualified_name);
22
+ TypePtr parseCustomType();
23
+ TypePtr parseTorchbindClassType();
24
+ TypePtr parseNonSimple(const std::string& token);
25
+
26
+ void expect(const char* s);
27
+ void expectChar(char c);
28
+ template <typename T>
29
+ TypePtr parseSingleElementType();
30
+
31
+ void lex();
32
+
33
+ std::string next();
34
+ c10::string_view nextView();
35
+ void advance();
36
+ C10_NODISCARD c10::string_view cur() const;
37
+
38
+ std::string pythonStr_;
39
+ size_t start_;
40
+ c10::string_view next_token_;
41
+
42
+ // Used for parsing string list
43
+ std::vector<std::string> pythonStrs_;
44
+ std::unordered_map<std::string, c10::TypePtr> str_type_ptr_map_;
45
+
46
+ // Store all contained types when parsing a string
47
+ std::unordered_set<std::string> contained_types_;
48
+ };
49
+
50
+ TORCH_API TypePtr parseType(const std::string& pythonStr);
51
+
52
+ TORCH_API std::vector<TypePtr> parseType(std::vector<std::string>& pythonStr);
53
+
54
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/upgrader_mobile.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // #include <ATen/core/ivalue.h>
4
+ #include <ATen/core/ivalue_inl.h>
5
+
6
+ #include <torch/csrc/jit/mobile/code.h>
7
+ #include <torch/csrc/jit/mobile/function.h>
8
+ #include <torch/csrc/jit/serialization/import_export_functions.h>
9
+ #include <memory>
10
+ #include <string>
11
+ #include <unordered_map>
12
+ #include <vector>
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+ struct Instruction;
17
+ struct Upgrader {
18
+ int min_version;
19
+ int max_version;
20
+ std::string upgrader_name;
21
+ int index;
22
+ };
23
+
24
+ // From operator_versions.yaml
25
+ TORCH_API const std::unordered_map<std::string, std::vector<Upgrader>>
26
+ getOperatorVersionMapForMobile();
27
+
28
+ struct OperatorString {
29
+ const std::string name;
30
+ const std::string overload_name;
31
+ const c10::optional<int> num_specified_args;
32
+ };
33
+
34
+ struct ByteCodeFunctionWithOperator {
35
+ mobile::Function& function;
36
+ std::vector<OperatorString> operators;
37
+ };
38
+
39
+ TORCH_API const std::vector<ByteCodeFunctionWithOperator>&
40
+ getUpgraderBytecodeList();
41
+
42
+ } // namespace jit
43
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <pybind11/pybind11.h>
3
+ #include <pybind11/stl.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/utils/pybind.h>
6
+
7
+ namespace py = pybind11;
8
+
9
+ namespace torch::jit {
10
+
11
+ inline c10::optional<Module> as_module(py::handle obj) {
12
+ static py::handle ScriptModule =
13
+ py::module::import("torch.jit").attr("ScriptModule");
14
+ if (py::isinstance(obj, ScriptModule)) {
15
+ return py::cast<Module>(obj.attr("_c"));
16
+ }
17
+ return c10::nullopt;
18
+ }
19
+
20
+ inline c10::optional<Object> as_object(py::handle obj) {
21
+ static py::handle ScriptObject =
22
+ py::module::import("torch").attr("ScriptObject");
23
+ if (py::isinstance(obj, ScriptObject)) {
24
+ return py::cast<Object>(obj);
25
+ }
26
+
27
+ static py::handle RecursiveScriptClass =
28
+ py::module::import("torch.jit").attr("RecursiveScriptClass");
29
+ if (py::isinstance(obj, RecursiveScriptClass)) {
30
+ return py::cast<Object>(obj.attr("_c"));
31
+ }
32
+ return c10::nullopt;
33
+ }
34
+
35
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <ATen/core/qualified_name.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <pybind11/complex.h>
8
+ #include <pybind11/pybind11.h>
9
+ #include <pybind11/pytypes.h>
10
+ #include <torch/csrc/Device.h>
11
+ #include <torch/csrc/Dtype.h>
12
+ #include <torch/csrc/Export.h>
13
+ #include <torch/csrc/Layout.h>
14
+ #include <torch/csrc/QScheme.h>
15
+ #include <torch/csrc/Stream.h>
16
+ #include <torch/csrc/jit/api/module.h>
17
+ #include <torch/csrc/jit/frontend/schema_matching.h>
18
+ #include <torch/csrc/jit/frontend/tracer.h>
19
+ #include <torch/csrc/jit/python/module_python.h>
20
+ #include <torch/csrc/jit/python/python_custom_class.h>
21
+ #include <torch/csrc/jit/python/python_tracer.h>
22
+ #include <torch/csrc/jit/resource_guard.h>
23
+ #include <torch/csrc/jit/runtime/operator.h>
24
+ #include <torch/csrc/utils/pybind.h>
25
+ #include <torch/csrc/utils/python_arg_parser.h>
26
+ #include <torch/csrc/utils/six.h>
27
+ #ifdef USE_DISTRIBUTED
28
+ #include <torch/csrc/distributed/rpc/py_rref.h>
29
+ #include <torch/csrc/distributed/rpc/rref_impl.h>
30
+ #endif
31
+
32
+ #include <ATen/core/function_schema.h>
33
+ #include <c10/core/Stream.h>
34
+ #ifdef USE_C10D_NCCL
35
+ #include <c10/cuda/CUDACachingAllocator.h>
36
+ #include <c10/cuda/CUDAStream.h>
37
+ #endif
38
+ #include <c10/util/Exception.h>
39
+ #include <c10/util/Optional.h>
40
+ #include <c10/util/irange.h>
41
+
42
+ #include <algorithm>
43
+ #include <cstddef>
44
+ #include <string>
45
+ #include <utility>
46
+ #include <vector>
47
+
48
+ // The visibility attribute is to avoid a warning about storing a field in the
49
+ // struct that has a different visibility (from pybind) than the struct.
50
+ #ifdef _WIN32
51
+ #define VISIBILITY_HIDDEN
52
+ #else
53
+ #define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
54
+ #endif
55
+
56
+ namespace torch::jit {
57
+
58
+ using ResolutionCallback = std::function<py::object(std::string)>;
59
+
60
+ void clear_registered_instances(void* ptr);
61
+
62
+ TORCH_PYTHON_API IValue toIValue(
63
+ py::handle obj,
64
+ const TypePtr& type,
65
+ c10::optional<int32_t> N = c10::nullopt);
66
+
67
+ TORCH_PYTHON_API py::object toPyObject(IValue ivalue);
68
+
69
+ // Hack to overload the behavior of toIValue to accept Python
70
+ // numbers in places where a Tensor is expected
71
+ // See also torch::should_allow_numbers_as_tensors
72
+ class ToIValueAllowNumbersAsTensors {
73
+ bool old_;
74
+
75
+ public:
76
+ ToIValueAllowNumbersAsTensors(bool enable);
77
+ ~ToIValueAllowNumbersAsTensors();
78
+ };
79
+
80
+ // Wrap Python function to guard deref
81
+ // NB: Need VISIBILITY_HIDDEN for silencing compiler error,
82
+ // 'torch::jit::PythonFunctionGuard' declared with greater visibility than the
83
+ // type of its field 'torch::jit::PythonFunctionGuard::func_'
84
+ struct VISIBILITY_HIDDEN PythonFunctionGuard {
85
+ explicit PythonFunctionGuard(py::function func) : func_(std::move(func)) {}
86
+
87
+ ~PythonFunctionGuard() {
88
+ pybind11::gil_scoped_acquire ag;
89
+ func_.dec_ref();
90
+ // explicitly setting PyObject* to nullptr to prevent py::object's dtor to
91
+ // decref on the PyObject again.
92
+ // See Note [Destructing py::object] in python_ivalue.h
93
+ func_.ptr() = nullptr;
94
+ }
95
+
96
+ py::function func_;
97
+ };
98
+
99
+ // The PythonFutureWrapper for ivalue::Future
100
+ //
101
+ // NB: VISIBILITY_HIDDEN is for silencing compiling error,
102
+ // "error: 'torch::jit::PythonFutureWrapper' declared with greater visibility
103
+ // than the type of its field 'torch::jit::PythonFutureWrapper::unwrap_func'
104
+ // [-Werror=attributes]"
105
+ //
106
+ // NB: inherit from enable_shared_from_this because then(py::function) needs to
107
+ // get a shared_ptr from this pointer.
108
+ struct VISIBILITY_HIDDEN PythonFutureWrapper
109
+ : std::enable_shared_from_this<PythonFutureWrapper> {
110
+ using UnwrapFunc = std::function<void(py::object)>;
111
+
112
+ explicit PythonFutureWrapper(
113
+ c10::intrusive_ptr<c10::ivalue::Future> fut,
114
+ c10::optional<UnwrapFunc> unwrap_func = c10::nullopt)
115
+ : fut(std::move(fut)), unwrap_func(std::move(unwrap_func)) {}
116
+
117
+ explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete;
118
+ PythonFutureWrapper& operator=(const PythonFutureWrapper&) = delete;
119
+
120
+ bool done() {
121
+ return fut->completed();
122
+ }
123
+
124
+ py::object value() {
125
+ // acquiring GIL as toPyObject creates new py::object
126
+ // without grabbing the GIL.
127
+ py::gil_scoped_acquire acquire;
128
+ py::object py_obj = toPyObject(fut->value());
129
+ // unwrap_func is a general compositional function that takes in a
130
+ // py::object and executes some python function. It is currently mostly used
131
+ // to throw python exceptions.
132
+ if (unwrap_func) {
133
+ (*unwrap_func)(py_obj);
134
+ }
135
+ return py_obj;
136
+ }
137
+
138
+ py::object wait() {
139
+ fut->wait();
140
+ if (jit::tracer::isTracing()) {
141
+ auto graph = jit::tracer::getTracingState()->graph;
142
+
143
+ Value* fut_val = jit::tracer::getValueTrace(fut);
144
+ auto output = graph->insert(aten::wait, {fut_val});
145
+ jit::tracer::setValueTrace(fut->value(), output);
146
+ }
147
+ return value();
148
+ }
149
+
150
+ // The py::function cb arg must take a std::shared_ptr<PythonFutureWrapper>
151
+ // (i.e., torch._C.Future) as the only argument. If the type mismatches, an
152
+ // error will be thrown when waiting for the value of this returned Future.
153
+ std::shared_ptr<PythonFutureWrapper> then(py::function cb) {
154
+ // We need this an additional layer of wrapper here to guard the
155
+ // destruction of the py::function object. Because, the
156
+ // Future owns a reference to the py::function in its callback
157
+ // vector, but Future does not acquire GIL on destruction.
158
+ auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
159
+
160
+ return std::make_shared<jit::PythonFutureWrapper>(fut->then(
161
+ // Capture a copy of the ivalue::Future instead of the `this` pointer
162
+ // because the PythonFutureWrapper object could have been deleted
163
+ // when the callbacks are fired. For example, RPC only captures the
164
+ // ivalue::Future instead of PythonFutureWrapper in JitFuture's
165
+ // callback functions. Hence, if user code does not hold a reference to
166
+ // this PythonFutureWrapper object, there is no guarantee that the
167
+ // PythonFutureWrapper is still valid when running the callback.
168
+ [pyFut(this->getPtr()),
169
+ pf(std::move(pf))](c10::ivalue::Future& /* unused */) -> IValue {
170
+ try {
171
+ pybind11::gil_scoped_acquire ag;
172
+ return toIValue(pf->func_(pyFut), PyObjectType::get());
173
+ } catch (py::error_already_set& e) {
174
+ auto err = std::runtime_error(c10::str(
175
+ "Got the following error when running the callback: ",
176
+ e.what()));
177
+ {
178
+ pybind11::gil_scoped_acquire ag;
179
+ // Release ownership on py::objects and also restore Python
180
+ // Error Indicator.
181
+ e.restore();
182
+ // Clear the Python Error Indicator as we has recorded the
183
+ // exception in the response message.
184
+ PyErr_Clear();
185
+ }
186
+
187
+ throw err;
188
+ }
189
+ },
190
+ PyObjectType::get()));
191
+ }
192
+
193
+ void add_done_callback(py::function cb) {
194
+ auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
195
+ // NOLINTNEXTLINE(modernize-avoid-bind)
196
+ fut->addCallback(std::bind(
197
+ [pyFut(this->getPtr())](std::shared_ptr<PythonFunctionGuard> pf) {
198
+ try {
199
+ pybind11::gil_scoped_acquire ag;
200
+ pf->func_(pyFut);
201
+ } catch (py::error_already_set& e) {
202
+ {
203
+ pybind11::gil_scoped_acquire ag;
204
+ // Release ownership on py::objects and also restore Python
205
+ // Error Indicator.
206
+ e.restore();
207
+ // Clear the Python Error Indicator as we has recorded the
208
+ // exception in the response message.
209
+ PyErr_Clear();
210
+ }
211
+ // Log and ignore exceptions raised through the callback
212
+ LOG(ERROR) << "Got the following error when running the callback: "
213
+ << e.what();
214
+
215
+ } catch (const std::exception& e) {
216
+ // Log and ignore exceptions raised through the callback
217
+ LOG(ERROR) << "Got the following error when running the callback: "
218
+ << e.what();
219
+ }
220
+ },
221
+ std::move(pf)));
222
+ }
223
+
224
+ void markCompleted(const py::object& pyValue) {
225
+ DCHECK(PyGILState_Check());
226
+ IValue value = toIValue(pyValue, PyObjectType::get());
227
+
228
+ py::gil_scoped_release release;
229
+ fut->markCompleted(std::move(value));
230
+ }
231
+
232
+ c10::intrusive_ptr<c10::ivalue::Future> fut;
233
+ // unwrap_func works like a callback for the value returned by
234
+ // PythonFutureWrapper::wait().
235
+ c10::optional<UnwrapFunc> unwrap_func;
236
+
237
+ private:
238
+ std::shared_ptr<PythonFutureWrapper> getPtr() {
239
+ return shared_from_this();
240
+ }
241
+ };
242
+
243
+ // The PythonAwaitWrapper for ivalue::Await
244
+ //
245
+ // Expresses delayed function execution with Lazy semantic.
246
+ // i.e. Await[W] in eager mode can be used as W.
247
+ // When the attribute of W type is requested, Await[W] will return the
248
+ // attribute of W, transparently calling wait() beforehand.
249
+ // No Lazy semantic for script, explicit wait(Await[W]) -> W must be called to
250
+ // convert to type W.
251
+ //
252
+ // The Await object takes shared ownership of specified function and the
253
+ // arguments. After first call for wait() it owns the result. Deliberately no
254
+ // type inference for eager mode.
255
+ struct VISIBILITY_HIDDEN PythonAwaitWrapper
256
+ : std::enable_shared_from_this<PythonAwaitWrapper> {
257
+ explicit PythonAwaitWrapper(c10::intrusive_ptr<c10::ivalue::Await> aw)
258
+ : aw_(std::move(aw)) {}
259
+ explicit PythonAwaitWrapper(py::handle input) {
260
+ args_ = py::tuple(1u);
261
+ args_[0] = input;
262
+ auto type = PyObjectType::get();
263
+ aw_ = c10::make_intrusive<c10::ivalue::Await>(type);
264
+ aw_->markCompleted(toIValue(input, type));
265
+ }
266
+
267
+ explicit PythonAwaitWrapper(py::function pf, py::tuple args) {
268
+ pyfg_ = std::make_shared<torch::jit::PythonFunctionGuard>(std::move(pf));
269
+ args_ = std::move(args);
270
+ std::function<IValue()> f = [fg(pyfg_), &args(args_)]() {
271
+ pybind11::gil_scoped_acquire ag;
272
+ return toIValue(fg->func_(*args), PyObjectType::get());
273
+ };
274
+ aw_ = c10::make_intrusive<c10::ivalue::Await>(
275
+ PyObjectType::get(), std::move(f));
276
+ }
277
+
278
+ explicit PythonAwaitWrapper(const PythonAwaitWrapper&) = delete;
279
+ PythonAwaitWrapper& operator=(const PythonAwaitWrapper&) = delete;
280
+
281
+ py::object wait() {
282
+ py::gil_scoped_acquire acquire;
283
+ return toPyObject(aw_->wait());
284
+ }
285
+
286
+ // Nowait semantic means trivial case when Await is constructed from the
287
+ // result
288
+ bool is_nowait() {
289
+ return pyfg_ == nullptr;
290
+ }
291
+
292
+ const py::function fn() {
293
+ TORCH_CHECK(
294
+ pyfg_, "Await constructed as awaitable_nowait does not have fn");
295
+ return pyfg_->func_;
296
+ }
297
+
298
+ const py::tuple args() {
299
+ return args_;
300
+ }
301
+
302
+ TypePtr type() {
303
+ return aw_->type();
304
+ }
305
+
306
+ c10::intrusive_ptr<c10::ivalue::Await> aw_;
307
+ std::shared_ptr<torch::jit::PythonFunctionGuard> pyfg_;
308
+ py::tuple args_;
309
+
310
+ private:
311
+ std::shared_ptr<PythonAwaitWrapper> getPtr() {
312
+ return shared_from_this();
313
+ }
314
+ };
315
+
316
+ // error reporting: when reporting user-caused errors, these functions should
317
+ // not use AT_ERROR macros, since these macros add stack trace information
318
+ // that is confusing to display to the end user since it always reports
319
+ // locations in libtorch code rather than user code.
320
+
321
+ inline std::shared_ptr<CompilationUnit> get_python_cu() {
322
+ return py::module::import("torch.jit._state")
323
+ .attr("_python_cu")
324
+ .cast<std::shared_ptr<CompilationUnit>>();
325
+ }
326
+
327
+ struct TypedIValue : public std::pair<IValue, TypePtr> {
328
+ using pair::pair;
329
+
330
+ IValue& ivalue() {
331
+ return this->first;
332
+ }
333
+ TypePtr& type() {
334
+ return this->second;
335
+ }
336
+ };
337
+
338
+ inline TypedIValue toDictKeyIValue(py::handle key) {
339
+ if (py::isinstance<py::str>(key)) {
340
+ return TypedIValue(
341
+ ConstantString::create(py::cast<std::string>(key)), StringType::get());
342
+ } else if (py::isinstance<py::int_>(key)) {
343
+ return TypedIValue(py::cast<int64_t>(key), IntType::get());
344
+ } else if (py::isinstance<py::float_>(key)) {
345
+ return TypedIValue(py::cast<double>(key), FloatType::get());
346
+ } else {
347
+ AT_ERROR("Dictionary inputs may only have string, int, or float keys");
348
+ }
349
+ }
350
+
351
+ inline c10::optional<TypePtr> unifyOrInitializeType(
352
+ const TypePtr& accum,
353
+ const TypePtr& unify) {
354
+ if (!accum) {
355
+ return unify;
356
+ }
357
+ return unifyTypes(accum, unify);
358
+ }
359
+
360
+ using InferredType = c10::InferredType;
361
+
362
+ InferredType tryToInferContainerType(py::handle input);
363
+
364
+ // Try to infer the type of a Python object
365
+ // The type cannot be inferred if:
366
+ // input is an empty container (list, dict)
367
+ // input is an list with element types that cannot be unified
368
+ // input is an dict with key or value types that cannot be unified
369
+ inline InferredType tryToInferType(py::handle input) {
370
+ // Try tensor types
371
+ if (THPVariable_Check(input.ptr())) {
372
+ return InferredType(TensorType::get());
373
+ }
374
+
375
+ if (input.is_none()) {
376
+ return InferredType(NoneType::get());
377
+ }
378
+
379
+ if (py::isinstance<StrongFunctionPtr>(input)) {
380
+ auto fn = py::cast<StrongFunctionPtr>(input).function_;
381
+ return InferredType(FunctionType::create(fn));
382
+ }
383
+
384
+ // Try basic types first
385
+ if (py::isinstance<py::bool_>(input)) {
386
+ return InferredType(BoolType::get());
387
+ // NOLINTNEXTLINE(bugprone-branch-clone)
388
+ } else if (py::isinstance<py::int_>(input)) {
389
+ return InferredType(IntType::get());
390
+ } else if (py::isinstance<py::float_>(input)) {
391
+ return InferredType(FloatType::get());
392
+ } else if (PyComplex_CheckExact(input.ptr())) {
393
+ return InferredType(ComplexType::get());
394
+ } else if (py::isinstance<py::str>(input)) {
395
+ return InferredType(StringType::get());
396
+ } else if (THPLayout_Check(input.ptr())) {
397
+ return InferredType(IntType::get());
398
+ } else if (THPDevice_Check(input.ptr())) {
399
+ return InferredType(DeviceObjType::get());
400
+ } else if (THPGenerator_Check(input.ptr())) {
401
+ return InferredType(GeneratorType::get());
402
+ } else if (THPStream_Check(input.ptr())) {
403
+ return InferredType(StreamObjType::get());
404
+ } else if (THPDtype_Check(input.ptr())) {
405
+ return InferredType(IntType::get());
406
+ } else if (THPQScheme_Check(input.ptr())) {
407
+ return InferredType(IntType::get());
408
+ } else if (THPLayout_Check(input.ptr())) {
409
+ return InferredType(IntType::get());
410
+ }
411
+
412
+ auto enum_type = py::module::import("enum").attr("Enum");
413
+ py::bool_ isEnumValue = py::isinstance(input, enum_type);
414
+ if (py::cast<bool>(isEnumValue)) {
415
+ auto enum_class = input.attr("__class__");
416
+ auto enum_type = py::cast<TypePtr>(
417
+ py::module::import("torch.jit.annotations")
418
+ .attr("try_ann_to_type")(enum_class, SourceRange()));
419
+ return InferredType(std::move(enum_type));
420
+ }
421
+
422
+ py::bool_ isClass =
423
+ py::module::import("inspect").attr("isclass")(input.get_type());
424
+ if (py::cast<bool>(isClass)) {
425
+ // Assume that the class is compiled already or will compile. Invalidate
426
+ // this later if needed.
427
+ bool class_compiled = true;
428
+
429
+ // Check if the type is already compiled.
430
+ py::object existing_ty = py::module::import("torch.jit._state")
431
+ .attr("_get_script_class")(input.get_type());
432
+
433
+ if (existing_ty.is_none()) {
434
+ // If not, try to compile it.
435
+ py::bool_ can_compile = py::module::import("torch._jit_internal")
436
+ .attr("can_compile_class")(input.get_type());
437
+
438
+ if (py::cast<bool>(can_compile)) {
439
+ // Try to compile the class. This is wrapped in a try-catch because
440
+ // compilation of class types can raise an Exception and in that case,
441
+ // we want to defer to other attempts at type inference below rather
442
+ // than fail compilation altogether.
443
+ try {
444
+ py::module::import("torch.jit._script")
445
+ .attr("_recursive_compile_class")(
446
+ input.get_type(), SourceRange());
447
+ } catch (...) {
448
+ // Invalidate the assumption that the class compiled so that we don't
449
+ // look up and return its JIT type as the type for the input.
450
+ class_compiled = false;
451
+ }
452
+ }
453
+ }
454
+
455
+ // If the class compiled successfully, look up the existing JIT type by
456
+ // qualified name and return it.
457
+ if (class_compiled) {
458
+ auto script_class = py::module::import("torch.jit._state")
459
+ .attr("_get_script_class")(input.get_type());
460
+
461
+ if (!script_class.is_none()) {
462
+ auto class_type = py::cast<ClassTypePtr>(script_class);
463
+
464
+ if (class_type && !class_type->is_module()) {
465
+ return InferredType(std::move(class_type));
466
+ }
467
+ }
468
+ }
469
+ }
470
+
471
+ if (py::isinstance<Object>(input)) {
472
+ auto object = py::cast<Object>(input);
473
+ return InferredType(object.type());
474
+ #ifdef USE_RPC
475
+ } else if (py::isinstance<torch::distributed::rpc::PyRRef>(input)) {
476
+ auto rref_ivalue = input.cast<torch::distributed::rpc::PyRRef>().toIValue();
477
+ return InferredType(rref_ivalue.type());
478
+ #endif
479
+ }
480
+
481
+ auto await_type = py::module::import("torch._awaits").attr("_Await");
482
+ py::bool_ is_await = py::isinstance(input, await_type);
483
+ if (py::cast<bool>(is_await)) {
484
+ auto awptr = input.cast<std::shared_ptr<PythonAwaitWrapper>>();
485
+ return InferredType(AwaitType::create(awptr->aw_->elementType()));
486
+ }
487
+
488
+ if (as_module(py::cast<py::object>(input))) {
489
+ return InferredType("Cannot infer type of ScriptModule");
490
+ }
491
+
492
+ auto module_type = py::module::import("torch.nn").attr("Module");
493
+ py::bool_ is_module = py::isinstance(input, module_type);
494
+ if (py::cast<bool>(is_module)) {
495
+ return InferredType("Cannot infer concrete type of torch.nn.Module");
496
+ }
497
+
498
+ // Try container types
499
+ return tryToInferContainerType(input);
500
+ }
501
+
502
+ inline InferredType tryToInferContainerType(py::handle input) {
503
+ if (six::isTuple(input)) {
504
+ py::tuple tuple = py::cast<py::tuple>(input);
505
+ std::vector<TypePtr> element_types;
506
+ element_types.reserve(tuple.size());
507
+
508
+ for (py::handle elem : tuple) {
509
+ auto type_match = tryToInferType(elem);
510
+ if (type_match.success()) {
511
+ element_types.push_back(type_match.type());
512
+ } else {
513
+ // Forward error message along
514
+ return type_match.reason();
515
+ }
516
+ }
517
+ return InferredType(TupleType::create(std::move(element_types)));
518
+ } else if (PyDict_Check(input.ptr())) {
519
+ // Check to make sure we can generate useful input/output types
520
+ auto dict = py::cast<py::dict>(input);
521
+ size_t len = py::len(dict);
522
+ if (!len) {
523
+ return InferredType("Dictionary inputs must have entries");
524
+ }
525
+
526
+ TypePtr key_type = nullptr;
527
+ TypePtr value_type = nullptr;
528
+
529
+ for (auto entry : dict) {
530
+ // Try to infer the key type and unify it with the existing one
531
+ auto entry_key_type_match = tryToInferType(entry.first);
532
+ if (!entry_key_type_match.success()) {
533
+ return entry_key_type_match.reason();
534
+ }
535
+ auto unified_key =
536
+ unifyOrInitializeType(key_type, entry_key_type_match.type());
537
+ if (!unified_key) {
538
+ return InferredType(c10::str(
539
+ "Dictionary inputs to traced functions must have consistent type. Found ",
540
+ key_type->repr_str(),
541
+ " and ",
542
+ (entry_key_type_match.type())->repr_str()));
543
+ }
544
+
545
+ // Try to infer the value type and unify it with the existing one
546
+ auto entry_value_type_match = tryToInferType(entry.second);
547
+ if (!entry_value_type_match.success()) {
548
+ return entry_value_type_match.reason();
549
+ }
550
+ auto unified_value =
551
+ unifyOrInitializeType(value_type, entry_value_type_match.type());
552
+ if (!unified_value) {
553
+ return InferredType(c10::str(
554
+ "Dictionary inputs to traced functions must have consistent type. Found ",
555
+ value_type->repr_str(),
556
+ " and ",
557
+ (entry_value_type_match.type())->repr_str()));
558
+ }
559
+
560
+ key_type = *unified_key;
561
+ value_type = *unified_value;
562
+ }
563
+ return InferredType(
564
+ DictType::create(std::move(key_type), std::move(value_type)));
565
+ } else if (PyList_Check(input.ptr())) {
566
+ auto list = py::cast<py::list>(input);
567
+ size_t len = py::len(list);
568
+ if (!len) {
569
+ return InferredType("List trace inputs must have elements");
570
+ }
571
+
572
+ TypePtr element_type = nullptr;
573
+ for (auto elem : list) {
574
+ auto element_type_match = tryToInferType(elem);
575
+ if (!element_type_match.success()) {
576
+ return InferredType(c10::str(
577
+ "Could not infer type of list element: ",
578
+ element_type_match.reason()));
579
+ }
580
+ auto unified_type =
581
+ unifyOrInitializeType(element_type, element_type_match.type());
582
+ if (!unified_type) {
583
+ return InferredType(c10::str(
584
+ "List inputs to traced functions must have consistent element type. Found ",
585
+ element_type->repr_str(),
586
+ " and ",
587
+ (element_type_match.type())->repr_str()));
588
+ }
589
+ element_type = *unified_type;
590
+ }
591
+ return InferredType(ListType::create(element_type));
592
+ } else {
593
+ // TODO: this message is not correct anymore, since this InferredType is
594
+ // used from a bunch of circumstances unrelated to tracing. We can re-use
595
+ // this instead of the attribute_failure stuff in concreteType
596
+ return InferredType(c10::str(
597
+ "Only tensors and (possibly nested) tuples of tensors, lists, or dicts",
598
+ "are supported ",
599
+ "as inputs or outputs of traced functions",
600
+ ", but instead got value of type ",
601
+ py::str(input.get_type().attr("__name__")),
602
+ "."));
603
+ }
604
+ }
605
+
606
+ inline bool isTraceableType(const TypePtr& type) {
607
+ if (type->isSubtypeOf(*TensorType::get())) {
608
+ return true;
609
+ }
610
+
611
+ if (auto list_type = type->cast<ListType>()) {
612
+ return isTraceableType(list_type->getElementType());
613
+ }
614
+
615
+ if (auto tuple_type = type->cast<TupleType>()) {
616
+ return std::all_of(
617
+ tuple_type->elements().begin(),
618
+ tuple_type->elements().end(),
619
+ [](const TypePtr& element_type) {
620
+ return isTraceableType(element_type);
621
+ });
622
+ }
623
+
624
+ if (auto dict_type = type->cast<DictType>()) {
625
+ return isTraceableType(dict_type->getValueType());
626
+ }
627
+
628
+ return false;
629
+ }
630
+
631
+ inline IValue toTypeInferredIValue(py::handle input) {
632
+ auto match = tryToInferType(input);
633
+ if (!match.success()) {
634
+ auto object = py::cast<py::object>(input);
635
+ if (auto mod = as_module(object)) {
636
+ // if obj is already a ScriptModule, just return its ivalue
637
+ auto ptr = mod.value()._ivalue();
638
+ // explict copy semantics for strong ownership of the resource.
639
+ return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
640
+ ptr.release());
641
+ }
642
+
643
+ // Check if the obj is a ScriptObject.
644
+ if (auto script_obj = as_object(object)) {
645
+ auto ptr = script_obj.value()._ivalue();
646
+ return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
647
+ ptr.release());
648
+ }
649
+ AT_ERROR(
650
+ "Tracer cannot infer type of ", py::str(input), "\n:", match.reason());
651
+ }
652
+ return toIValue(input, match.type());
653
+ }
654
+
655
+ inline Stack toTraceableStack(const py::tuple& inputs) {
656
+ auto info = toTypeInferredIValue(inputs);
657
+ TORCH_CHECK(
658
+ isTraceableType(info.type()),
659
+ "Type '",
660
+ info.type()->repr_str(),
661
+ "' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and"
662
+ " Tuples of Tensors can be traced");
663
+ return info.toTupleRef().elements().vec();
664
+ }
665
+
666
+ // Serialize the python dictionary into a traceable stack.
667
+ inline Stack toTraceableStack(const py::dict& inputs) {
668
+ Stack res;
669
+ for (auto it = inputs.begin(); it != inputs.end(); it++) {
670
+ if (THPVariable_Check(it->second.ptr())) {
671
+ res.push_back(toIValue(it->second, tryToInferType(it->second).type()));
672
+ }
673
+ }
674
+ return res;
675
+ }
676
+
677
+ inline IValue createGenericList(py::handle obj, const TypePtr& elem_type) {
678
+ auto elems = c10::impl::GenericList(elem_type);
679
+ for (auto elem : obj) {
680
+ elems.push_back(toIValue(elem, elem_type));
681
+ }
682
+ return IValue(elems);
683
+ }
684
+
685
+ inline IValue createGenericDict(
686
+ const py::dict& obj,
687
+ const TypePtr& key_type,
688
+ const TypePtr& value_type) {
689
+ c10::impl::GenericDict elems(key_type, value_type);
690
+ elems.reserve(py::len(obj));
691
+ for (auto& entry : obj) {
692
+ elems.insert(
693
+ toIValue(entry.first, key_type), toIValue(entry.second, value_type));
694
+ }
695
+ return IValue(elems);
696
+ }
697
+
698
+ template <class T>
699
+ inline void guardAgainstNamedTensor(const T& var) {
700
+ TORCH_CHECK(
701
+ !var.has_names(),
702
+ "NYI: Named tensors are currently unsupported in TorchScript. As a "
703
+ "workaround please drop names via `tensor = tensor.rename(None)`.");
704
+ }
705
+
706
+ // Extract custom class registered with torchbind
707
+ template <typename T>
708
+ c10::intrusive_ptr<T> toCustomClass(py::handle obj) {
709
+ static_assert(
710
+ std::is_base_of<CustomClassHolder, T>::value, "T is not a CustomClass");
711
+ const auto& type = c10::getCustomClassType<c10::intrusive_ptr<T>>();
712
+ c10::IValue ivalue = toIValue(obj, type);
713
+ return std::move(ivalue).toCustomClass<T>();
714
+ }
715
+
716
+ // Small wrapper around getting the type name string from Python to make
717
+ // types easier to interpret, e.g. give the structural type for a NamedTuple
718
+ inline std::string friendlyTypeName(py::handle obj) {
719
+ if (py::isinstance<py::tuple>(obj) && py::hasattr(obj, "_fields")) {
720
+ auto field_names =
721
+ py::cast<std::vector<std::string>>(py::getattr(obj, "_fields"));
722
+ std::stringstream ss;
723
+ ss << py::str(obj.get_type().attr("__name__"));
724
+ ss << " (aka NamedTuple(";
725
+ bool first = true;
726
+ for (auto& field_name : field_names) {
727
+ if (!first) {
728
+ ss << ", ";
729
+ }
730
+ ss << field_name;
731
+ first = false;
732
+ }
733
+ ss << "))";
734
+ return ss.str();
735
+ } else {
736
+ return py::str(obj.get_type().attr("__name__"));
737
+ }
738
+ }
739
+
740
+ // Thrown when trying to create a schema for a list of python
741
+ // arguments that cannot be converted.
742
+ // Can be caught by the caller to attempt to use other schema
743
+ // when there is an overloaded operator.
744
+ struct schema_match_error : public std::runtime_error {
745
+ using std::runtime_error::runtime_error;
746
+ };
747
+
748
+ inline IValue argumentToIValue(
749
+ const FunctionSchema& schema,
750
+ size_t argumentPosition,
751
+ py::handle object) {
752
+ const auto& argument = schema.arguments().at(argumentPosition);
753
+ try {
754
+ return toIValue(object, argument.real_type(), argument.N());
755
+ } catch (const py::cast_error& error) {
756
+ throw schema_match_error(c10::str(
757
+ schema.formatTypeMismatchMsg(
758
+ argument,
759
+ friendlyTypeName(object),
760
+ argumentPosition,
761
+ py::repr(object)),
762
+ "\nCast error details: ",
763
+ error.what()));
764
+ } catch (const py::error_already_set& error) {
765
+ throw schema_match_error(c10::str(
766
+ schema.formatTypeMismatchMsg(
767
+ argument,
768
+ friendlyTypeName(object),
769
+ argumentPosition,
770
+ py::repr(object)),
771
+ "\n Python error details: ",
772
+ error.what()));
773
+ }
774
+ }
775
+
776
+ inline IValue returnToIValue(const TypePtr& type, py::handle object) {
777
+ try {
778
+ return toIValue(object, type);
779
+ } catch (const py::cast_error& error) {
780
+ throw std::runtime_error(c10::str(
781
+ " expected value of type ",
782
+ type->str(),
783
+ " for return value but instead got value of type ",
784
+ py::str(object.get_type().attr("__name__")),
785
+ ".",
786
+ "\nValue: ",
787
+ py::repr(object),
788
+ "\nCast error details: ",
789
+ error.what()));
790
+ }
791
+ }
792
+
793
+ inline py::object getScriptedClassOrError(const c10::NamedTypePtr& classType) {
794
+ auto py_class =
795
+ py::module::import("torch.jit._state")
796
+ .attr("_get_python_class")(classType->name()->qualifiedName());
797
+ if (py_class.is_none()) {
798
+ std::stringstream err;
799
+ err << "Unknown reference to ScriptClass ";
800
+ err << classType->name()->qualifiedName();
801
+ err << ". (Did you forget to import it?)";
802
+ throw std::runtime_error(err.str());
803
+ }
804
+ return py_class;
805
+ }
806
+
807
+ struct VISIBILITY_HIDDEN tuple_slice {
808
+ /*implicit*/ tuple_slice(py::tuple tup_)
809
+ : tup(std::move(tup_)), b(0), e(tup.size()) {}
810
+ tuple_slice(py::tuple tup_, int64_t b_)
811
+ : tup(std::move(tup_)), b(b_), e(tup.size()) {}
812
+ tuple_slice(py::tuple tup_, int64_t b_, int64_t e_)
813
+ : tup(std::move(tup_)), b(b_), e(e_) {}
814
+ py::detail::tuple_iterator begin() const {
815
+ return {tup, static_cast<pybind11::ssize_t>(b)};
816
+ }
817
+ py::detail::tuple_iterator end() const {
818
+ return {tup, static_cast<pybind11::ssize_t>(e)};
819
+ }
820
+ size_t size() const {
821
+ return e - b;
822
+ }
823
+ py::detail::tuple_accessor operator[](size_t index) const {
824
+ return {tup, static_cast<size_t>(b + index)};
825
+ }
826
+
827
+ private:
828
+ py::tuple tup;
829
+ int64_t b;
830
+ int64_t e;
831
+ };
832
+
833
+ inline Stack createStackForSchema(
834
+ const FunctionSchema& schema,
835
+ const tuple_slice& args,
836
+ const py::kwargs& kwargs,
837
+ c10::optional<IValue> self) {
838
+ size_t all_arguments = (self ? 1 : 0) + args.size() + kwargs.size();
839
+ if (all_arguments > schema.arguments().size()) {
840
+ throw schema_match_error(c10::str(
841
+ schema.name(),
842
+ "() expected at most ",
843
+ schema.arguments().size(),
844
+ " argument(s) but received ",
845
+ all_arguments,
846
+ " argument(s). Declaration: ",
847
+ schema));
848
+ }
849
+ Stack stack;
850
+ stack.reserve(schema.arguments().size());
851
+
852
+ int64_t arg_idx = 0;
853
+ if (self) {
854
+ push(stack, std::move(*self));
855
+ arg_idx++;
856
+ }
857
+ // First push all positional args.
858
+ for (const auto& arg : args) {
859
+ // ...but refuse to do it if the schema says that this was supposed
860
+ // to be keyword only
861
+ if (schema.arguments()[arg_idx].kwarg_only()) {
862
+ throw schema_match_error(c10::str(
863
+ schema.name(),
864
+ "() takes ",
865
+ arg_idx,
866
+ " positional argument(s) but ",
867
+ self ? 1 + args.size() : args.size(),
868
+ " was/were given. Declaration: ",
869
+ schema));
870
+ }
871
+ // Use the type information from the schema to convert the PyObject.
872
+ push(stack, argumentToIValue(schema, stack.size(), arg));
873
+ arg_idx++;
874
+ }
875
+
876
+ // Now for every remaining non-positional argument in the schema, look for it
877
+ // in the kwargs dict and push it if found, or use its default value if it
878
+ // has one.
879
+ size_t consumed_kwargs = 0;
880
+ for (size_t i = stack.size(); i < schema.arguments().size(); ++i) {
881
+ const auto& arg = schema.arguments()[i];
882
+ if (kwargs.contains(arg.name().c_str())) {
883
+ push(stack, argumentToIValue(schema, i, kwargs[arg.name().c_str()]));
884
+ consumed_kwargs += 1;
885
+ } else if (arg.default_value()) {
886
+ push(stack, *arg.default_value());
887
+ } else {
888
+ throw schema_match_error(c10::str(
889
+ schema.name(),
890
+ "() is missing value for argument '",
891
+ arg.name(),
892
+ "'. Declaration: ",
893
+ schema));
894
+ }
895
+ }
896
+
897
+ if (consumed_kwargs != kwargs.size()) {
898
+ std::vector<std::string> names;
899
+ for (const auto& kwarg : kwargs) {
900
+ names.emplace_back(py::cast<std::string>(kwarg.first));
901
+ }
902
+ throw schema_match_error(schema.findErrorInKwargs(names));
903
+ }
904
+
905
+ return stack;
906
+ }
907
+
908
+ inline py::object createPyObjectForStack(Stack&& stack) {
909
+ if (stack.empty()) {
910
+ return py::none();
911
+ }
912
+
913
+ // Return a simple value and not a single-element tuple if there is only one
914
+ // return value.
915
+ if (stack.size() == 1) {
916
+ return toPyObject(std::move(stack[0]));
917
+ }
918
+
919
+ // If there is more than one return value, pop them into a py::tuple.
920
+ py::tuple return_values(stack.size());
921
+ for (const auto ret : c10::irange(return_values.size())) {
922
+ return_values[ret] = toPyObject(std::move(stack[ret]));
923
+ }
924
+
925
+ return std::move(return_values);
926
+ }
927
+
928
+ // TODO: Remove once we clean up the GraphExecutor usage.
929
+ inline Stack evilDeprecatedBadCreateStackDoNotUse(
930
+ const py::tuple& tuple,
931
+ at::ArrayRef<Value*> inputs,
932
+ size_t reserve_extra_space = 0) {
933
+ if (tuple.size() != inputs.size()) {
934
+ AT_ERROR(
935
+ "expected " + std::to_string(inputs.size()) + " inputs, but got " +
936
+ std::to_string(tuple.size()));
937
+ }
938
+ Stack result;
939
+ result.reserve(tuple.size() + reserve_extra_space);
940
+ for (const auto i : c10::irange(inputs.size())) {
941
+ result.push_back(toIValue(std::move(tuple[i]), inputs[i]->type()));
942
+ }
943
+ return result;
944
+ }
945
+
946
+ // Run `callee`, potentially inserting a CallFunction/CallMethod node into the
947
+ // tracing graph.
948
+ inline py::object runAndInsertCall(
949
+ Function& callee,
950
+ const tuple_slice& args,
951
+ const py::kwargs& kwargs,
952
+ c10::optional<IValue> self,
953
+ // Lambda that tells this function how to insert `callee` into the graph if
954
+ // we're tracing.
955
+ const std::function<Value*(Graph&, const MatchedSchema& match)>&
956
+ callInserter) {
957
+ auto stack =
958
+ createStackForSchema(callee.getSchema(), args, kwargs, std::move(self));
959
+ const auto& tracing_state = tracer::getTracingState();
960
+ if (!tracing_state) {
961
+ pybind11::gil_scoped_release no_gil_guard;
962
+ // If we're not tracing, just run the callee as normal.
963
+ callee.run(stack);
964
+ } else {
965
+ // If we are tracing, insert the appropriate CallFunction or CallMethod node
966
+ // and then run the callee with tracing disabled.
967
+
968
+ // Get the graph `Value`s that represent the input IValues
969
+ auto inputs = last(stack, callee.num_inputs());
970
+ auto input_values =
971
+ fmap(inputs, [](const IValue& v) { return tracer::getValueTrace(v); });
972
+ TORCH_INTERNAL_ASSERT(callee.getSchema().returns().size() == 1)
973
+ auto return_type = callee.getSchema().returns().at(0).type();
974
+ auto graph = tracing_state->graph;
975
+ std::vector<NamedValue> named_values;
976
+ named_values.reserve(input_values.size());
977
+ for (Value* v : input_values) {
978
+ named_values.emplace_back(v);
979
+ }
980
+
981
+ // Add a call node.
982
+ MatchedSchema match = matchSchema(
983
+ callee.getSchema(),
984
+ tracer::getPythonInterpreterSourceRange(),
985
+ *graph,
986
+ named_values,
987
+ {});
988
+ auto output_value = callInserter(*graph, match);
989
+
990
+ // Actually run the callee. Pause the tracer so that we don't double-add the
991
+ // callee nodes.
992
+ {
993
+ pybind11::gil_scoped_release no_gil_guard;
994
+ ResourceGuard guard(tracer::pauseTracing());
995
+ callee.run(stack);
996
+ }
997
+
998
+ // Associate the output IValues with the output `Value`s in the graph
999
+ tracer::setValueTrace(stack.back(), output_value);
1000
+ }
1001
+
1002
+ TORCH_CHECK(
1003
+ !stack.empty(),
1004
+ "Expected values in the stack after execution but found none");
1005
+ return toPyObject(std::move(stack.back()));
1006
+ }
1007
+
1008
+ inline c10::optional<py::object> maybeTorchFunctionDispatch(
1009
+ const py::object& callee,
1010
+ const tuple_slice& args_no_self,
1011
+ const py::kwargs& kwargs,
1012
+ const c10::QualifiedName qualname) {
1013
+ std::vector<py::handle> args_vec;
1014
+ for (const auto& arg : args_no_self) {
1015
+ args_vec.push_back(arg);
1016
+ }
1017
+ py::tuple args = py::cast(args_vec);
1018
+
1019
+ // Handle __torch_function__ dispatch
1020
+ std::vector<PyObject*> overloaded_args;
1021
+ size_t total_arg_num = args.size() + kwargs.size();
1022
+ for (const auto& arg : args) {
1023
+ is_tensor_and_append_overloaded(arg.ptr(), &overloaded_args);
1024
+ is_tensor_list_and_append_overloaded(
1025
+ arg.ptr(),
1026
+ &overloaded_args,
1027
+ static_cast<int>(total_arg_num),
1028
+ false /* throw_error */);
1029
+ }
1030
+ // NB: for kwargs, we cannot guarantee the order of appending
1031
+ // is the same as the argument order in operator's schema.
1032
+ // This is suboptimal, but should be fine. Later when we have
1033
+ // better schema matching and argument parsing, we could
1034
+ // match the operator in `operations` first, then the order will
1035
+ // be guaranteed.
1036
+ for (auto item : kwargs) {
1037
+ is_tensor_and_append_overloaded(item.second.ptr(), &overloaded_args);
1038
+ is_tensor_list_and_append_overloaded(
1039
+ item.second.ptr(),
1040
+ &overloaded_args,
1041
+ total_arg_num,
1042
+ false /* throw_error */);
1043
+ }
1044
+ if (!overloaded_args.empty()) {
1045
+ return pybind11::reinterpret_steal<py::object>(
1046
+ handle_torch_function_no_python_arg_parser(
1047
+ /*overloaded_args=*/overloaded_args,
1048
+ /*args=*/args.ptr(),
1049
+ /*kwargs=*/kwargs.ptr(),
1050
+ /*func_name=*/qualname.name().c_str(),
1051
+ /*torch_api_function=*/callee.ptr(),
1052
+ /*module_name=*/qualname.prefix().c_str()));
1053
+ }
1054
+
1055
+ return c10::nullopt;
1056
+ }
1057
+
1058
+ inline py::object invokeScriptFunctionFromPython(
1059
+ Function& callee,
1060
+ const tuple_slice& args,
1061
+ const py::kwargs& kwargs) {
1062
+ // TODO: we could add __torch_function__ dispatch here but I don't know
1063
+ // the implications of doing so
1064
+
1065
+ return runAndInsertCall(
1066
+ callee,
1067
+ args,
1068
+ kwargs,
1069
+ /*self=*/c10::nullopt,
1070
+ [&](Graph& graph, const MatchedSchema& match) {
1071
+ return graph.insertFunctionCall(&callee, match);
1072
+ });
1073
+ }
1074
+
1075
+ inline py::object invokeScriptMethodFromPython(
1076
+ Method& callee,
1077
+ const tuple_slice& args,
1078
+ const py::kwargs& kwargs) {
1079
+ auto self = callee.owner()._ivalue();
1080
+
1081
+ if (auto torch_fn_result = maybeTorchFunctionDispatch(
1082
+ py::cast(callee), args, kwargs, callee.name())) {
1083
+ return *torch_fn_result;
1084
+ }
1085
+
1086
+ return runAndInsertCall(
1087
+ callee.function(),
1088
+ args,
1089
+ kwargs,
1090
+ self,
1091
+ [&](Graph& graph, const MatchedSchema& match) {
1092
+ return graph.insertMethodCall(callee.name(), match);
1093
+ });
1094
+ }
1095
+
1096
+ TORCH_PYTHON_API std::pair<std::shared_ptr<Operator>, Stack> getOpWithStack(
1097
+ const std::vector<std::shared_ptr<Operator>>& operations,
1098
+ py::args args,
1099
+ const py::kwargs& kwargs);
1100
+
1101
+ TORCH_PYTHON_API py::object invokeOperatorFromPython(
1102
+ const std::vector<std::shared_ptr<Operator>>& operations,
1103
+ py::args args,
1104
+ const py::kwargs& kwargs,
1105
+ c10::optional<c10::DispatchKey> dk = c10::nullopt);
1106
+
1107
+ TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet(
1108
+ const std::vector<std::shared_ptr<Operator>>& operations,
1109
+ Symbol symbol,
1110
+ py::args args,
1111
+ const py::kwargs& kwargs,
1112
+ bool is_overload,
1113
+ c10::optional<c10::DispatchKey> dk = c10::nullopt);
1114
+
1115
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/hash.h>
4
+ #include <c10/util/irange.h>
5
+ #include <torch/csrc/autograd/variable.h>
6
+ #include <torch/csrc/jit/python/pybind.h>
7
+
8
+ #include <ATen/ATen.h>
9
+ #include <functional>
10
+ #include <tuple>
11
+ #include <vector>
12
+
13
+ namespace torch::jit::python {
14
+
15
+ struct IODescriptor {
16
+ struct VariableMetadata {
17
+ VariableMetadata(const autograd::Variable& var)
18
+ : sizes(var.sizes().vec()),
19
+ type(var.scalar_type()),
20
+ device(var.device()),
21
+ requires_grad(var.requires_grad()) {}
22
+
23
+ bool operator==(const VariableMetadata& o) const {
24
+ return std::tie(device, requires_grad, type, sizes) ==
25
+ std::tie(o.device, o.requires_grad, o.type, o.sizes);
26
+ }
27
+
28
+ static size_t hash(const VariableMetadata& m) {
29
+ return c10::get_hash(m.sizes, m.device, m.requires_grad, m.type);
30
+ }
31
+
32
+ std::vector<int64_t> sizes;
33
+ at::ScalarType type;
34
+ at::Device device;
35
+ bool requires_grad;
36
+ };
37
+
38
+ bool operator==(const IODescriptor& o) const {
39
+ return std::tie(structure, metadata, grad_enabled) ==
40
+ std::tie(o.structure, o.metadata, o.grad_enabled);
41
+ }
42
+
43
+ static size_t hash(const IODescriptor& o) {
44
+ return c10::get_hash(o.structure, o.metadata, o.grad_enabled);
45
+ }
46
+
47
+ void extend(const autograd::variable_list& list) {
48
+ metadata.reserve(metadata.size() + list.size());
49
+ for (auto& var : list)
50
+ metadata.emplace_back(var);
51
+ }
52
+
53
+ // Description of argument structure. Variables are replaced with
54
+ // different characters, depending on their flags, beginnings and
55
+ // ends of tuples and lists are denoted by a pair of parenthesis
56
+ // of their corresponding kind. They should always be paired.
57
+ // Example desc: (vv[v(v)v])
58
+ // NOTE: if extend() was ever called then metadata.size() can be
59
+ // different than the number of 'v's in structure.
60
+ std::string structure;
61
+ std::vector<std::string> strings;
62
+ std::vector<VariableMetadata> metadata;
63
+ bool grad_enabled = false;
64
+ };
65
+
66
+ static inline std::ostream& operator<<(
67
+ std::ostream& out,
68
+ const IODescriptor::VariableMetadata& meta) {
69
+ at::Device meta_device = meta.device;
70
+ auto& t = at::getDeprecatedTypeProperties(
71
+ meta_device.is_cpu() ? at::Backend::CPU : at::Backend::CUDA, meta.type);
72
+ out << t << "(requires_grad=" << meta.requires_grad;
73
+ if (meta_device.is_cuda()) {
74
+ out << ", device=" << meta_device.index();
75
+ }
76
+ out << ") {";
77
+ for (const auto i : c10::irange(meta.sizes.size())) {
78
+ if (i > 0)
79
+ out << ", ";
80
+ out << meta.sizes[i];
81
+ }
82
+ out << "}";
83
+ return out;
84
+ }
85
+
86
+ static inline std::ostream& operator<<(
87
+ std::ostream& out,
88
+ const IODescriptor& desc) {
89
+ out << desc.structure << "\n";
90
+ out << " with grad_enabled=" << desc.grad_enabled << "\n";
91
+ for (const auto i : c10::irange(desc.metadata.size())) {
92
+ out << " with v" << i << " having type " << desc.metadata[i] << "\n";
93
+ }
94
+ return out;
95
+ }
96
+
97
+ struct ParsedArgs {
98
+ // Flat vector of Variables found in arguments
99
+ autograd::variable_list vars;
100
+ // Metadata describing nesting of objects received from Python and
101
+ // metadata of vars and whether grad is enabled.
102
+ IODescriptor desc;
103
+
104
+ void extend(const autograd::variable_list& list) {
105
+ if (list.empty())
106
+ return;
107
+ vars.reserve(vars.size() + list.size());
108
+ for (auto& var : list)
109
+ vars.emplace_back(var);
110
+ desc.extend(list);
111
+ }
112
+ };
113
+
114
+ ParsedArgs flatten(py::handle obj);
115
+ PyObject* unflatten(
116
+ at::ArrayRef<autograd::Variable> vars,
117
+ const IODescriptor& structure);
118
+
119
+ } // namespace torch::jit::python
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/utils/object_ptr.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ void initPythonIRBindings(PyObject* module);
9
+
10
+ // execute a Python function, used for Ops we can't optimize but that we want to
11
+ // optimize around
12
+ struct ConcretePythonOp : public PythonOp {
13
+ static Symbol Kind;
14
+
15
+ ConcretePythonOp(Graph* graph) : PythonOp(graph, ::c10::prim::PythonOp) {}
16
+ ConcretePythonOp* init(
17
+ THPObjectPtr&& pyobj,
18
+ const std::string& cconv,
19
+ pyobj_list&& scalar_args) {
20
+ this->pyobj = std::move(pyobj);
21
+ this->scalar_args = std::move(scalar_args);
22
+ this->cconv = cconv;
23
+ return this;
24
+ }
25
+ // The Python object which contains the implementation of this function.
26
+ // This is either a class (non-legacy) or an object (legacy). See
27
+ // TraceInterpreterState for execution semantics.
28
+ THPObjectPtr pyobj;
29
+ // The calling convention for the Python function.
30
+ // 'c' -- constant argument
31
+ // 'd' -- dynamic argument
32
+ std::string cconv;
33
+ // Scalar arguments to the Python function. Not necessarily passed to
34
+ // the function in this order; see cconv for the correct order.
35
+ std::vector<THPObjectPtr> scalar_args;
36
+
37
+ std::string name() const override;
38
+ void cloneFrom(Node* other_) override;
39
+ Node* allocNewInstance(Graph* g) override {
40
+ return new ConcretePythonOp(g);
41
+ }
42
+ // recover the autograd.Function instance, if this PythonOp's function
43
+ // was originally SomeFunction.apply
44
+ // used in ONNX for discovering symbolics
45
+ c10::optional<THPObjectPtr> autogradFunction() const override;
46
+ void writeScalars(std::ostream& out) const override;
47
+ void lint_python() const override;
48
+ };
49
+
50
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <pybind11/pybind11.h>
4
+ #include <torch/csrc/jit/python/pybind_utils.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ namespace py = pybind11;
9
+
10
+ namespace c10::ivalue {
11
+
12
+ // concrete ivalue Holder that hold a py::object
13
+ struct C10_EXPORT ConcretePyObjectHolder final : PyObjectHolder {
14
+ public:
15
+ static c10::intrusive_ptr<PyObjectHolder> create(py::object py_obj) {
16
+ return c10::make_intrusive<ConcretePyObjectHolder>(std::move(py_obj));
17
+ }
18
+
19
+ static c10::intrusive_ptr<PyObjectHolder> create(const py::handle& handle) {
20
+ py::gil_scoped_acquire ag;
21
+ return c10::make_intrusive<ConcretePyObjectHolder>(
22
+ handle.cast<py::object>());
23
+ }
24
+
25
+ PyObject* getPyObject() override {
26
+ return py_obj_.ptr();
27
+ }
28
+
29
+ InferredType tryToInferType() override {
30
+ pybind11::gil_scoped_acquire ag;
31
+ return torch::jit::tryToInferType(py_obj_);
32
+ }
33
+
34
+ IValue toIValue(const TypePtr& type, c10::optional<int32_t> N = c10::nullopt)
35
+ override {
36
+ pybind11::gil_scoped_acquire ag;
37
+ return torch::jit::toIValue(py_obj_, type, N);
38
+ }
39
+
40
+ std::string toStr() override {
41
+ pybind11::gil_scoped_acquire ag;
42
+ return py::str(py_obj_);
43
+ }
44
+
45
+ std::vector<at::Tensor> extractTensors() override {
46
+ // We could implement this entirely in C++ via pybind11 but it turns out to
47
+ // be substantially slower. Namely, the total time taken by markCompleted on
48
+ // a CUDAFuture is 21.5us with this implementation, but goes up to 58.7us
49
+ // when using C++. The reason is unclear.
50
+ try {
51
+ pybind11::gil_scoped_acquire ag;
52
+ static py::object& extractorFn = *new py::object(
53
+ py::module::import("torch._jit_internal").attr("_extract_tensors"));
54
+ return extractorFn(py_obj_).cast<std::vector<at::Tensor>>();
55
+ } catch (py::error_already_set& e) {
56
+ auto err = std::runtime_error(
57
+ c10::str("Cannot extract tensors from value: ", e.what()));
58
+ {
59
+ pybind11::gil_scoped_acquire ag;
60
+ e.restore();
61
+ PyErr_Clear();
62
+ }
63
+ throw err;
64
+ }
65
+ }
66
+
67
+ // Note [Destructing py::object]
68
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~
69
+ //
70
+ // (1) Why py_obj_ = py::none(); does not work. Because we also need to
71
+ // acquire GIL when destructing py::object of None that de-references None.
72
+ // https://docs.python.org/3/c-api/none.html#c.Py_RETURN_NONE
73
+ //
74
+ // https://stackoverflow.com/questions/15287590/why-should-py-increfpy-none-be-required-before-returning-py-none-in-c
75
+ //
76
+ // (2) Why we need to call dec_ref() explicitly. Because py::object of
77
+ // nullptr, on destruction, effectively does nothing because of it calls
78
+ // Py_XDECREF(NULL) underlying.
79
+ // https://docs.python.org/3/c-api/refcounting.html#c.Py_XDECREF
80
+ ~ConcretePyObjectHolder() override {
81
+ pybind11::gil_scoped_acquire ag;
82
+ py_obj_.dec_ref();
83
+ // explicitly setting PyObject* to nullptr to prevent py::object's dtor to
84
+ // decref on the PyObject again.
85
+ py_obj_.ptr() = nullptr;
86
+ }
87
+
88
+ // explicit construction to avoid errornous implicit conversion and
89
+ // copy-initialization
90
+ explicit ConcretePyObjectHolder(py::object py_obj)
91
+ : py_obj_(std::move(py_obj)) {}
92
+
93
+ private:
94
+ py::object py_obj_;
95
+ };
96
+
97
+ } // namespace c10::ivalue
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_sugared_value.h ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/frontend/concrete_module_type.h>
5
+ #include <torch/csrc/jit/frontend/sugared_value.h>
6
+ #include <torch/csrc/jit/python/pybind_utils.h>
7
+ #include <memory>
8
+ #include <sstream>
9
+ #include <string>
10
+ #include <utility>
11
+ #include <vector>
12
+
13
+ namespace torch::jit {
14
+
15
+ std::string typeString(py::handle h);
16
+
17
+ inline std::shared_ptr<SugaredValue> toSimple(Value* v) {
18
+ return std::make_shared<SimpleValue>(v);
19
+ }
20
+
21
+ // NB: This should be the single entry-point for instantiating a SugaredValue
22
+ // from a Python object. If you are adding support for converting a new Python
23
+ // type, *add it in this function's implementation*.
24
+ std::shared_ptr<SugaredValue> toSugaredValue(
25
+ py::object obj,
26
+ GraphFunction& m,
27
+ const SourceRange& loc,
28
+ bool is_constant = false);
29
+
30
+ c10::optional<StrongFunctionPtr> as_function(const py::object& obj);
31
+
32
+ struct VISIBILITY_HIDDEN PythonValue : public SugaredValue {
33
+ PythonValue(
34
+ py::object the_self,
35
+ c10::optional<py::object> rcb = c10::nullopt,
36
+ Value* module_self = nullptr)
37
+ : self(std::move(the_self)),
38
+ rcb(std::move(rcb)),
39
+ moduleSelf_(module_self) {}
40
+
41
+ FunctionSchema getSchema(
42
+ const size_t n_args,
43
+ const size_t n_binders,
44
+ const SourceRange& loc);
45
+
46
+ // call it like a function, e.g. `outputs = this(inputs)`
47
+ std::shared_ptr<SugaredValue> call(
48
+ const SourceRange& loc,
49
+ GraphFunction& m,
50
+ at::ArrayRef<NamedValue> args,
51
+ at::ArrayRef<NamedValue> kwargs,
52
+ size_t n_binders) override;
53
+
54
+ std::string kind() const override;
55
+
56
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
57
+ const SourceRange& loc,
58
+ GraphFunction& m,
59
+ const c10::optional<size_t>& size_hint = {}) override;
60
+
61
+ std::shared_ptr<SugaredValue> attr(
62
+ const SourceRange& loc,
63
+ GraphFunction& m,
64
+ const std::string& field) override;
65
+
66
+ Value* asValue(const SourceRange& loc, GraphFunction& m) override {
67
+ throw ErrorReport(loc)
68
+ << kind() << " cannot be used as a value. "
69
+ << "Perhaps it is a closed over global variable? If so, please "
70
+ << "consider passing it in as an argument or use a local varible "
71
+ << "instead.";
72
+ }
73
+
74
+ protected:
75
+ py::object getattr(const SourceRange& loc, const std::string& name);
76
+
77
+ void checkForAddToConstantsError(std::stringstream& ss);
78
+
79
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
80
+ py::object self;
81
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
82
+ c10::optional<py::object> rcb;
83
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
84
+ Value* moduleSelf_ = nullptr;
85
+ };
86
+
87
+ struct VISIBILITY_HIDDEN PythonModuleValue : public PythonValue {
88
+ explicit PythonModuleValue(py::object mod) : PythonValue(std::move(mod)) {}
89
+
90
+ std::shared_ptr<SugaredValue> attr(
91
+ const SourceRange& loc,
92
+ GraphFunction& m,
93
+ const std::string& field) override;
94
+ };
95
+
96
+ // Used for desugaring uses of the torch.cuda module. All the CUDA APIs with
97
+ // torch.cuda.* are resolved using CUDAPythonModuleValue.
98
+ struct VISIBILITY_HIDDEN CUDAPythonModuleValue : public PythonValue {
99
+ explicit CUDAPythonModuleValue(py::object mod)
100
+ : PythonValue(std::move(mod)) {}
101
+
102
+ std::shared_ptr<SugaredValue> attr(
103
+ const SourceRange& loc,
104
+ GraphFunction& m,
105
+ const std::string& field) override;
106
+ };
107
+
108
+ // Represents all the parameters of a module as a List[Tensor]
109
+ struct VISIBILITY_HIDDEN ConstantParameterList : public SugaredValue {
110
+ ConstantParameterList(Value* the_list) : the_list_(the_list) {}
111
+ std::string kind() const override {
112
+ return "constant parameter list";
113
+ }
114
+ std::shared_ptr<SugaredValue> call(
115
+ const SourceRange& loc,
116
+ GraphFunction& caller,
117
+ at::ArrayRef<NamedValue> args,
118
+ at::ArrayRef<NamedValue> kwargs,
119
+ size_t n_binders) override {
120
+ return toSimple(the_list_);
121
+ }
122
+
123
+ private:
124
+ Value* the_list_;
125
+ };
126
+
127
+ struct VISIBILITY_HIDDEN ModuleDictMethod : public SugaredValue {
128
+ explicit ModuleDictMethod(SugaredValuePtr iterable, std::string name)
129
+ : iterable_(std::move(iterable)), name_(std::move(name)){};
130
+
131
+ std::string kind() const override {
132
+ return name_;
133
+ }
134
+
135
+ std::shared_ptr<SugaredValue> call(
136
+ const SourceRange& loc,
137
+ GraphFunction& f,
138
+ at::ArrayRef<NamedValue> args,
139
+ at::ArrayRef<NamedValue> kwargs,
140
+ size_t n_binders) override {
141
+ if (!args.empty() || !kwargs.empty()) {
142
+ throw ErrorReport(loc)
143
+ << name_ << " method does not accept any arguments";
144
+ }
145
+ return iterable_;
146
+ }
147
+
148
+ SugaredValuePtr iterable_;
149
+ const std::string name_;
150
+ };
151
+
152
+ struct SugaredDict;
153
+
154
+ // defines how modules/methods behave inside the script subset.
155
+ // for now this does not have any interaction with python.
156
+ // in the future, we will add the ability to resolve `self.foo` to python
157
+ // {functions, modules, constants} so this SugaredValue is defined here
158
+ // anticipating we will eventually need to replace Module with a py::object
159
+ // holding the actual nn.Module class.
160
+
161
+ struct VISIBILITY_HIDDEN ModuleValue : public SugaredValue {
162
+ ModuleValue(Value* self, std::shared_ptr<ConcreteModuleType> concreteType)
163
+ : self_(self), concreteType_(std::move(concreteType)) {}
164
+
165
+ std::string kind() const override {
166
+ return "module";
167
+ }
168
+
169
+ Value* asValue(const SourceRange& loc, GraphFunction& m) override;
170
+
171
+ SugaredValuePtr asTupleValue(const SourceRange& loc, GraphFunction& m)
172
+ override;
173
+
174
+ // select an attribute on it, e.g. `this.field`
175
+ std::shared_ptr<SugaredValue> tryGetAttr(
176
+ const SourceRange& loc,
177
+ GraphFunction& m,
178
+ const std::string& field);
179
+
180
+ // select an attribute on it, e.g. `this.field`
181
+ std::shared_ptr<SugaredValue> attr(
182
+ const SourceRange& loc,
183
+ GraphFunction& m,
184
+ const std::string& field) override;
185
+
186
+ // select an attribute on it, e.g. `this.field`
187
+ bool hasAttr(
188
+ const SourceRange& loc,
189
+ GraphFunction& m,
190
+ const std::string& field) override;
191
+
192
+ // call module.forward with pre_hooks and hooks
193
+ std::shared_ptr<SugaredValue> call(
194
+ const SourceRange& loc,
195
+ GraphFunction& caller,
196
+ at::ArrayRef<NamedValue> args,
197
+ at::ArrayRef<NamedValue> kwargs,
198
+ size_t n_binders) override;
199
+
200
+ std::shared_ptr<SugaredDict> getSugaredDict(
201
+ const SourceRange& loc,
202
+ GraphFunction& m);
203
+
204
+ std::shared_ptr<SugaredDict> getSugaredNamedBufferDict(
205
+ const SourceRange& loc,
206
+ GraphFunction& m);
207
+
208
+ std::shared_ptr<SugaredDict> getSugaredNamedParameterList(
209
+ const SourceRange& loc,
210
+ GraphFunction& m);
211
+
212
+ std::shared_ptr<SugaredDict> getSugaredNamedParameterDict(
213
+ const SourceRange& loc,
214
+ GraphFunction& m);
215
+
216
+ void setAttr(
217
+ const SourceRange& loc,
218
+ GraphFunction& m,
219
+ const std::string& field,
220
+ Value* newValue) override;
221
+
222
+ SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override;
223
+
224
+ std::shared_ptr<SugaredValue> getitem(
225
+ const SourceRange& loc,
226
+ GraphFunction& m,
227
+ Value* idx,
228
+ TypePtr type_hint) override;
229
+
230
+ private:
231
+ // Check that the type of all submodules is a subtype of ty. If the function
232
+ // returns false, more information about why it returns false (e.g. which
233
+ // submodule's type is not a subtype of ty) is printed it why_not if it is not
234
+ // null.
235
+ bool areAllSubmodulesSubtypeOf(
236
+ const TypePtr& ty,
237
+ std::ostream* why_not = nullptr) const;
238
+
239
+ Value* self_;
240
+ std::shared_ptr<ConcreteModuleType> concreteType_;
241
+ };
242
+
243
+ bool isNamedTupleClass(const py::object& obj);
244
+ TypePtr registerNamedTuple(
245
+ const py::object& obj,
246
+ const SourceRange& loc,
247
+ const ResolutionCallback& rcb);
248
+
249
+ void recurseThroughNestedModules(
250
+ const SourceRange& loc,
251
+ GraphFunction& m,
252
+ std::vector<SugaredValuePtr>& keys,
253
+ std::vector<SugaredValuePtr>& values,
254
+ std::shared_ptr<ModuleValue>& self,
255
+ const std::string& prefix,
256
+ const std::string& field);
257
+
258
+ // Used to support named_modules()
259
+ struct VISIBILITY_HIDDEN SugaredDict : public SugaredValue {
260
+ explicit SugaredDict(
261
+ std::shared_ptr<ModuleValue> self,
262
+ std::shared_ptr<SugaredTupleValue> keys,
263
+ std::shared_ptr<SugaredTupleValue> modules)
264
+ : self_(std::move(self)),
265
+ keys_(std::move(keys)),
266
+ modules_(std::move(modules)) {}
267
+
268
+ std::string kind() const override {
269
+ return "ModuleDict";
270
+ }
271
+
272
+ std::shared_ptr<SugaredTupleValue> getKeys() {
273
+ return keys_;
274
+ }
275
+
276
+ std::shared_ptr<SugaredTupleValue> getModules() {
277
+ return modules_;
278
+ }
279
+
280
+ std::shared_ptr<SugaredValue> attr(
281
+ const SourceRange& loc,
282
+ GraphFunction& m,
283
+ const std::string& field) override;
284
+
285
+ SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override {
286
+ return keys_;
287
+ };
288
+
289
+ std::shared_ptr<ModuleValue> self_;
290
+ std::shared_ptr<SugaredTupleValue> keys_;
291
+ std::shared_ptr<SugaredTupleValue> modules_;
292
+ };
293
+
294
+ struct VISIBILITY_HIDDEN BooleanDispatchValue : public SugaredValue {
295
+ BooleanDispatchValue(py::dict dispatched_fn)
296
+ : dispatched_fn_(std::move(dispatched_fn)) {}
297
+
298
+ std::string kind() const override {
299
+ return "boolean dispatch";
300
+ }
301
+
302
+ std::shared_ptr<SugaredValue> call(
303
+ const SourceRange& loc,
304
+ GraphFunction& caller,
305
+ at::ArrayRef<NamedValue> args,
306
+ at::ArrayRef<NamedValue> kwargs,
307
+ size_t n_binders) override;
308
+
309
+ private:
310
+ py::dict dispatched_fn_;
311
+ };
312
+
313
+ struct VISIBILITY_HIDDEN PythonClassValue : public ClassValue {
314
+ PythonClassValue(ClassTypePtr type, py::object py_type)
315
+ : ClassValue(std::move(type)), py_type_(std::move(py_type)) {}
316
+
317
+ std::string kind() const override {
318
+ return "Python type";
319
+ }
320
+
321
+ std::shared_ptr<SugaredValue> attr(
322
+ const SourceRange& loc,
323
+ GraphFunction& m,
324
+ const std::string& field) override;
325
+
326
+ bool hasAttr(
327
+ const SourceRange& loc,
328
+ GraphFunction& m,
329
+ const std::string& field) override;
330
+
331
+ private:
332
+ py::object py_type_;
333
+ };
334
+
335
+ struct VISIBILITY_HIDDEN PythonExceptionValue : public ExceptionValue {
336
+ explicit PythonExceptionValue(const py::object& exception_class)
337
+ : ExceptionValue(
338
+ py::str(py::getattr(exception_class, "__name__", py::str("")))),
339
+ exception_class_qualified_name_(
340
+ py::str(py::module::import("torch._jit_internal")
341
+ .attr("_qualified_name")(
342
+ exception_class,
343
+ /*mangle_name=*/false))) {}
344
+
345
+ std::string kind() const override {
346
+ return "Python exception";
347
+ }
348
+
349
+ std::shared_ptr<SugaredValue> call(
350
+ const SourceRange& loc,
351
+ GraphFunction& caller,
352
+ at::ArrayRef<NamedValue> args,
353
+ at::ArrayRef<NamedValue> kwargs,
354
+ size_t n_binders) override;
355
+
356
+ private:
357
+ std::string exception_class_qualified_name_;
358
+ };
359
+
360
+ // Python Slice class.
361
+ struct VISIBILITY_HIDDEN PythonSliceClass : public SugaredValue {
362
+ explicit PythonSliceClass() = default;
363
+
364
+ std::string kind() const override {
365
+ return "Python slice class";
366
+ }
367
+
368
+ std::shared_ptr<SugaredValue> call(
369
+ const SourceRange& loc,
370
+ GraphFunction& caller,
371
+ at::ArrayRef<NamedValue> args,
372
+ at::ArrayRef<NamedValue> kwargs,
373
+ size_t n_binders) override;
374
+ };
375
+
376
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ void initTreeViewBindings(PyObject* module);
8
+
9
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <unordered_map>
5
+ #include <unordered_set>
6
+ #include <utility>
7
+
8
+ #include <ATen/ATen.h>
9
+ #include <torch/csrc/jit/resource_guard.h>
10
+ #include <torch/csrc/jit/tensorexpr/analysis.h>
11
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
12
+ #include <torch/csrc/jit/tensorexpr/ir.h>
13
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
14
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
15
+ #include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
16
+
17
+ namespace torch {
18
+ namespace jit {
19
+ namespace tensorexpr {
20
+
21
+ // A class that analyzes the given program relevant for Block backend.
22
+ class BlockAnalysis : public IRVisitor {
23
+ public:
24
+ bool is_buf_store_target(BufPtr buf) const {
25
+ return store_targets_.count(buf) > 0;
26
+ }
27
+
28
+ const std::unordered_set<BufPtr>& loads() const {
29
+ return loads_;
30
+ }
31
+
32
+ const std::unordered_set<BufPtr>& stores() const {
33
+ return store_targets_;
34
+ }
35
+
36
+ int block_size() const {
37
+ return block_size_;
38
+ }
39
+
40
+ bool areBufsInMap(const std::unordered_set<BufPtr>& bufs) const;
41
+
42
+ BufPtr getMultiDimBuf(BufPtr buf) const;
43
+
44
+ std::string getInputName(BufPtr buf) const;
45
+
46
+ std::string getFlatInputName(BufPtr buf) const {
47
+ return getInputName(std::move(buf)) + "_flat";
48
+ }
49
+
50
+ std::unordered_map<std::string, BufPtr> getBufferMap() const {
51
+ return map_input_to_tensor_bufs_;
52
+ }
53
+
54
+ private:
55
+ void visit(StorePtr v) override;
56
+ void visit(LoadPtr v) override;
57
+ void visit(ForPtr v) override;
58
+
59
+ std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
60
+ std::unordered_set<BufPtr> store_targets_;
61
+ std::unordered_set<BufPtr> loads_;
62
+ int block_size_ = 32;
63
+ };
64
+
65
+ // A class that overrides the underlying IRPrinter to produce Block.
66
+ class BlockPrinter : public IRPrinter {
67
+ public:
68
+ BlockPrinter(std::ostream* os, BlockAnalysis* block_analysis)
69
+ : IRPrinter(*os), block_analysis_(block_analysis) {}
70
+
71
+ using IRPrinter::name_manager;
72
+ using IRPrinter::visit;
73
+
74
+ private:
75
+ BlockAnalysis* block_analysis_;
76
+ std::unordered_map<std::string, int> dim_values_map;
77
+ std::vector<std::string> dim_names = {"N", "H", "W", "C"};
78
+ std::vector<std::string> flat_dim_names = {"N", "NH", "NHW", "NHWC"};
79
+ void PrintTensorInfo(const std::unordered_set<BufPtr>& bufs);
80
+ void PrintArguments(const std::unordered_set<BufPtr>& bufs);
81
+ void PrintBufferInfo(const std::unordered_set<BufPtr>& bufs);
82
+ void PrintDistribution(const std::unordered_set<BufPtr>& bufs);
83
+ void PrintLoop(const std::unordered_set<BufPtr>& bufs, bool block_idx = true);
84
+ void PrintReshapeInfo(
85
+ const std::unordered_set<BufPtr>& bufs,
86
+ bool reverse = false);
87
+ void PrintDMAs(const std::unordered_set<BufPtr>& bufs);
88
+ void PrintAdjustBuffers(const std::unordered_set<BufPtr>& bufs);
89
+
90
+ void visit(ForPtr v) override;
91
+ void visit(LoadPtr v) override;
92
+ void visit(StorePtr v) override;
93
+ void visit(BlockPtr v) override;
94
+ void visit(AddPtr v) override;
95
+ void visit(MulPtr v) override;
96
+ };
97
+
98
+ class TORCH_API BlockCodeGen : public CodeGen {
99
+ public:
100
+ template <typename... Ts>
101
+ /* implicit */
102
+ BlockCodeGen(StmtPtr stmt, Ts... ts)
103
+ : CodeGen(
104
+ stmt,
105
+ std::vector<BufferArg>({BufferArg(ts)...}),
106
+ at::Device(at::kCPU)) {
107
+ Initialize();
108
+ }
109
+
110
+ BlockCodeGen(
111
+ StmtPtr stmt,
112
+ const std::vector<BufferArg>& buffer_args,
113
+ at::Device device = at::Device(at::kCPU),
114
+ const std::string& kernel_func_name = "func")
115
+ : CodeGen(stmt, buffer_args, device, kernel_func_name) {
116
+ Initialize();
117
+ }
118
+
119
+ ~BlockCodeGen() override;
120
+
121
+ void call(const std::vector<CallArg>& args) override;
122
+ void call_raw(const std::vector<void*>& args) override;
123
+
124
+ void Initialize();
125
+
126
+ std::string getCodeText(const std::string& attr = "") override {
127
+ return oss_.str();
128
+ }
129
+
130
+ private:
131
+ UniqueNameManager* name_manager() {
132
+ if (!printer_) {
133
+ throw std::runtime_error("Null IRPrinter is not expected");
134
+ }
135
+ return printer_->name_manager();
136
+ }
137
+
138
+ std::ostream& os() {
139
+ return printer_->os();
140
+ }
141
+
142
+ std::ostringstream oss_;
143
+ std::unique_ptr<BlockPrinter> printer_;
144
+ std::unique_ptr<BlockAnalysis> block_analysis_;
145
+
146
+ std::string GetUniqueFuncName(const std::string& func_prefix);
147
+ };
148
+ } // namespace tensorexpr
149
+ } // namespace jit
150
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <map>
4
+ #include <unordered_map>
5
+ #include <vector>
6
+
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/tensorexpr/mem_dependency_checker.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+
14
+ class Expr;
15
+ class Buf;
16
+ class Stmt;
17
+
18
+ enum C10_API_ENUM TensorAccessKind { kLoad, kStore, kMutate };
19
+
20
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
21
+ struct TORCH_API TensorAccessBoundsInfo {
22
+ TensorAccessKind kind;
23
+ std::vector<ExprPtr> start;
24
+ std::vector<ExprPtr> stop;
25
+ };
26
+
27
+ using BoundsInfo =
28
+ std::unordered_map<BufPtr, std::vector<TensorAccessBoundsInfo>>;
29
+
30
+ TORCH_API BoundsInfo inferBounds(StmtPtr s, bool distinctAccessKinds = true);
31
+
32
+ // Bounds inference caching the analysis. The MemDependencyChecker must already
33
+ // have been run.
34
+ TORCH_API BoundsInfo getInferredBounds(
35
+ analysis::MemDependencyChecker& analyzer,
36
+ StmtPtr s,
37
+ bool distinctAccessKinds = true);
38
+ TORCH_API BoundsInfo getInferredBounds(
39
+ analysis::MemDependencyChecker& analyzer,
40
+ ExprPtr e,
41
+ bool distinctAccessKinds = true);
42
+
43
+ TORCH_API void printBoundsInfo(const BoundsInfo& v);
44
+
45
+ TORCH_API std::vector<ExprPtr> getBoundExtents(
46
+ const std::vector<TensorAccessBoundsInfo>& infos);
47
+
48
+ // The kind of dependency found, in increasing order of exclusivity.
49
+ enum class HazardKind {
50
+ ReadAfterWrite,
51
+ WriteAfterRead,
52
+ WriteAfterWrite,
53
+ NoDependency,
54
+ };
55
+ TORCH_API HazardKind getPotentialHazards(
56
+ analysis::MemDependencyChecker& analyzer,
57
+ StmtPtr A,
58
+ StmtPtr B);
59
+
60
+ // Returns true if there is a conflicting overlap between accesses in
61
+ // statements A and B. A conflicting overlap is an overlap in buffer accesses
62
+ // where at least one of the accesses is a Store.
63
+ TORCH_API bool hasConflictingOverlap(
64
+ analysis::MemDependencyChecker& analyzer,
65
+ StmtPtr A,
66
+ StmtPtr B);
67
+ // Same as above, between accesses in stores S1 and S2.
68
+ TORCH_API bool isOverlapping(
69
+ analysis::MemDependencyChecker& analyzer,
70
+ StorePtr S1,
71
+ StorePtr S2);
72
+ // Same as above, between accesses in store S and load L.
73
+ TORCH_API bool isOverlapping(
74
+ analysis::MemDependencyChecker& analyzer,
75
+ StorePtr S,
76
+ LoadPtr L);
77
+
78
+ } // namespace tensorexpr
79
+ } // namespace jit
80
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/expr.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir.h>
5
+
6
+ #include <deque>
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+ namespace analysis {
14
+
15
+ // A simple class containing the start and end of a range in a single dimension.
16
+ struct TORCH_API Bound {
17
+ ExprPtr start{nullptr};
18
+ ExprPtr end{nullptr};
19
+
20
+ // This stores whether or not the start and end of this Bound have previously
21
+ // been swapped. This occurs when the bound is in a loop with a negative
22
+ // stride.
23
+ bool swapped{false};
24
+
25
+ Bound() = default;
26
+ Bound(ExprPtr s, ExprPtr e) : start(std::move(s)), end(std::move(e)) {}
27
+
28
+ void print() const;
29
+ bool equals(const Bound& other) const;
30
+
31
+ // The comparison operators are conservative. If the compare operator returns
32
+ // true, it means that all the elements satisfy the logical expression. But
33
+ // the false does not mean the opposite comparison is satisfied. It could be
34
+ // but not always.
35
+ bool operator==(const Bound& other) const;
36
+ bool operator!=(const Bound& other) const;
37
+ bool operator<(const Bound& other) const;
38
+ bool operator<=(const Bound& other) const;
39
+ bool operator>(const Bound& other) const;
40
+ bool operator>=(const Bound& other) const;
41
+
42
+ void swap() {
43
+ std::swap(start, end);
44
+ swapped = !swapped;
45
+ }
46
+ };
47
+
48
+ struct BoundHash {
49
+ size_t operator()(const Bound& b) const {
50
+ return std::hash<ExprPtr>()(b.start) ^ std::hash<ExprPtr>()(b.end);
51
+ }
52
+ };
53
+
54
+ // The type of overlap found. Each condition is true only if none of the
55
+ // previous conditions hold.
56
+ // ContainedOrEqual: All elements in the Bound A are in the Bound B (this
57
+ // includes the case where the bounds are equal).
58
+ // Contains: All elements in the Bound B are in the Bound B.
59
+ // PartialOverlap: Any elements in the Bound B are in the Bound A.
60
+ // NoOverlap: No elements in the Bound A are in the bound B.
61
+ enum class OverlapKind {
62
+ ContainedOrEqual,
63
+ Contains,
64
+ PartialOverlap,
65
+ NoOverlap
66
+ };
67
+
68
+ // The Bound comparison result.
69
+ // True: Every Bound element always satisfies the given comparison operator
70
+ // False: Every Bound element always does NOT satisfy the given comparison
71
+ // operator
72
+ // NotDetermined: Some elements satisfy the given comparison operator and
73
+ // some elements not
74
+ enum class CmpEvalResult { True, False, NotDetermined };
75
+
76
+ // Returns the kind of overlap between Bound A and Bound A in a single
77
+ // dimension.
78
+ OverlapKind TORCH_API boundOverlap(Bound A, Bound B);
79
+
80
+ // The comparison is conservative and the compare result is deterministic.
81
+ // It means that every element of the Bound to be compared needs to satisfy
82
+ // the given comparison operator.
83
+ CmpEvalResult TORCH_API compareBound(
84
+ const Bound& a,
85
+ const Bound& b,
86
+ const CompareSelectOperation& cmp_op);
87
+
88
+ // A multi dimensional bound representing the bound of a set of indices.
89
+ using IndexBounds = std::vector<Bound>;
90
+
91
+ // Returns true if two IndexBounds are equivalent.
92
+ bool TORCH_API indexBoundsEquals(const IndexBounds& A, const IndexBounds& B);
93
+
94
+ // Flattens a multi dimensional bound to a single dimension. The IndexBounds "a"
95
+ // *must* encapsulate the entire range of the buffer.
96
+ Bound TORCH_API flattenBounds(const IndexBounds& a);
97
+
98
+ // Determines the kind of overlap in X dimensions.
99
+ OverlapKind TORCH_API overlaps(const IndexBounds& a, const IndexBounds& b);
100
+
101
+ // Returns the Bound slices created by subtracing bound B from bound A.
102
+ // Multiple Bounds can be returned in the case where B slices A into two
103
+ // distinct regions with no overlap.
104
+ //
105
+ // For example:
106
+ // subtractBound((0, 10), (2, 4)) => [(0, 1), (5, 10)]
107
+ // bound A: (0, 10)
108
+ // bound B: (2, 4)
109
+ // If we remove slice (2, 4) from the slice (0, 10), we will be left
110
+ // with 2 slices, one at the start (0, 1), and one at the end (5, 10).
111
+ // So, the result of this subtraction is [(0, 1), (5, 10)].
112
+ //
113
+ // Note: this doesn't use IndexBounds because the Bounds returned do not
114
+ // represent multiple different dimensions.
115
+ std::vector<Bound> TORCH_API subtractBound(Bound a, Bound b);
116
+
117
+ // Returns the bound slices created by subtracting the IndexBounds B from A.
118
+ std::vector<IndexBounds> TORCH_API subtractIndicesBounds(
119
+ const IndexBounds& A,
120
+ const IndexBounds& B,
121
+ OverlapKind overlap);
122
+ std::vector<IndexBounds> TORCH_API
123
+ subtractIndicesBounds(const IndexBounds& A, const IndexBounds& B);
124
+
125
+ } // namespace analysis
126
+ } // namespace tensorexpr
127
+ } // namespace jit
128
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir.h>
5
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
6
+
7
+ #include <utility>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ namespace tensorexpr {
12
+
13
+ template <typename T>
14
+ class PaddedBuffer;
15
+
16
+ class TORCH_API CodeGen {
17
+ public:
18
+ class BufferArg;
19
+ class CallArg;
20
+
21
+ template <typename... Ts>
22
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
23
+ CodeGen(StmtPtr stmt, Ts... ts)
24
+ : stmt_(std::move(stmt)), buffer_args_({BufferArg(ts)...}) {}
25
+
26
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
27
+ CodeGen(
28
+ StmtPtr stmt,
29
+ std::vector<BufferArg> buffer_args,
30
+ at::Device device = at::kCPU,
31
+ std::string kernel_func_name = "func");
32
+
33
+ virtual ~CodeGen() = default;
34
+
35
+ StmtPtr stmt() const {
36
+ return stmt_;
37
+ }
38
+
39
+ void set_stmt(StmtPtr s) {
40
+ stmt_ = s;
41
+ }
42
+
43
+ void apply_mutator(IRMutator* mutator) {
44
+ stmt_ = stmt_->accept_mutator(mutator);
45
+ }
46
+
47
+ void apply_visitor(IRVisitor* visitor) {
48
+ stmt_->accept(visitor);
49
+ }
50
+
51
+ std::vector<BufferArg>& buffer_args() {
52
+ return buffer_args_;
53
+ }
54
+
55
+ const std::vector<BufferArg>& buffer_args() const {
56
+ return buffer_args_;
57
+ }
58
+
59
+ at::Device device() {
60
+ return device_;
61
+ }
62
+
63
+ // This function returns the generated code as
64
+ // a string.
65
+ virtual std::string getCodeText(const std::string& attr = "") {
66
+ return ("");
67
+ }
68
+
69
+ // TODO: Figure out how to unify these call interfaces.
70
+
71
+ /// Call a function with a vector of CallArgs, which are tagged
72
+ /// unions that properly type the arguments.
73
+ virtual void call(const std::vector<CallArg>& args) = 0;
74
+
75
+ /// Call a function faster than a regular `call` by assuming that
76
+ /// the generated kernel already knows the type of the arguments, so
77
+ /// they can be type-punned with `void*`s.
78
+ virtual void call_raw(const std::vector<void*>& args) = 0;
79
+
80
+ /// Call a function even faster than a regular call, by assuming
81
+ /// that the number of thread blocks can be derived from `numel` via
82
+ /// a simple division, rather than evaluating an expression.
83
+ virtual void call_with_numel(void** args, int64_t numel);
84
+
85
+ virtual at::Tensor empty_strided(
86
+ c10::IntArrayRef size,
87
+ c10::IntArrayRef stride,
88
+ c10::optional<c10::ScalarType> dtype_opt,
89
+ c10::optional<c10::Layout> layout_opt,
90
+ c10::optional<c10::Device> device_opt,
91
+ c10::optional<bool> pin_memory_opt) {
92
+ return at::empty_strided(
93
+ size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
94
+ }
95
+
96
+ const std::string& kernel_func_name() const {
97
+ return kernel_func_name_;
98
+ }
99
+
100
+ void allocIntermediateBufs();
101
+
102
+ protected:
103
+ static void* argToPtr(const BufferArg& bufferArg, const CallArg& callArg);
104
+
105
+ private:
106
+ StmtPtr stmt_;
107
+ std::vector<BufferArg> buffer_args_;
108
+ at::Device device_ = at::kCPU;
109
+ std::string kernel_func_name_ = "func";
110
+ };
111
+
112
+ class TORCH_API ExtCallMemoryReuse : public IRMutator {
113
+ static std::unordered_map<std::string, std::string> makeExtCallFuncNameMap();
114
+ static const std::unordered_map<std::string, std::string> extCallFuncNameMap_;
115
+
116
+ public:
117
+ explicit ExtCallMemoryReuse(
118
+ const std::vector<CodeGen::BufferArg>& bufferArgs);
119
+ ~ExtCallMemoryReuse() override = default;
120
+ StmtPtr mutate(ExternalCallPtr v) override;
121
+
122
+ private:
123
+ std::unordered_set<BufPtr> bufferArgs_;
124
+ };
125
+
126
+ class CodeGen::BufferArg {
127
+ public:
128
+ BufferArg(const Tensor& tensor) : buf_(tensor.buf()) {}
129
+ BufferArg(const VarHandle& var) : var_(var.node()), isVar_(true) {}
130
+ BufferArg(const BufHandle& buf) : buf_(buf.node()) {}
131
+ BufferArg(BufPtr buf) : buf_(std::move(buf)) {}
132
+
133
+ VarPtr var() const {
134
+ return isVar_ ? var_ : buf_->base_handle();
135
+ }
136
+
137
+ BufPtr buf() const {
138
+ return buf_;
139
+ }
140
+
141
+ bool isVar() const {
142
+ return isVar_;
143
+ }
144
+
145
+ Dtype dtype() const {
146
+ return isVar_ ? var_->dtype() : buf_->dtype();
147
+ }
148
+
149
+ private:
150
+ VarPtr var_ = nullptr;
151
+ BufPtr buf_ = nullptr;
152
+ bool isVar_ = false;
153
+ };
154
+
155
+ class CodeGen::CallArg {
156
+ public:
157
+ template <typename T>
158
+ CallArg(const PaddedBuffer<T>& buffer);
159
+
160
+ template <typename T>
161
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,cppcoreguidelines-pro-type-const-cast)
162
+ CallArg(const std::vector<T>& buffer)
163
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
164
+ : data_(const_cast<T*>(buffer.data())) {}
165
+
166
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
167
+ CallArg(void* ptr) : data_(ptr) {}
168
+
169
+ #define ARG_TYPE_CTOR(Type, Name) \
170
+ CallArg(Type v) { \
171
+ memcpy(buffer_, &v, sizeof(Type)); \
172
+ data_ = (void*)buffer_; \
173
+ }
174
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
175
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_TYPE_CTOR);
176
+ #undef ARG_TYPE_CTOR
177
+
178
+ void* data() const {
179
+ return data_;
180
+ }
181
+
182
+ CallArg(const CallArg& rhs) {
183
+ if (rhs.data_ == rhs.buffer_) {
184
+ memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
185
+ this->data_ = (void*)(this->buffer_);
186
+ } else {
187
+ this->data_ = rhs.data_;
188
+ }
189
+ }
190
+
191
+ CallArg& operator=(const CallArg& rhs) {
192
+ if (rhs.data_ == rhs.buffer_) {
193
+ memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
194
+ this->data_ = (void*)(this->buffer_);
195
+ } else {
196
+ this->data_ = rhs.data_;
197
+ }
198
+ return *this;
199
+ }
200
+
201
+ #define ARG_PTR_DEFINE(Type, Name) \
202
+ Type* Name##Ptr() const { \
203
+ TORCH_INTERNAL_ASSERT(data_ == (void*)buffer_); \
204
+ return (Type*)data_; \
205
+ }
206
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
207
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_PTR_DEFINE);
208
+ #undef ARG_PTR_DEFINE
209
+
210
+ private:
211
+ void* data_;
212
+ // Regarding a scalar value, CallArg uses void**=&data_ to store it. But the
213
+ // bit width of a pointer is 32bit on a 32bit platform. It cannot store the
214
+ // scalar if the bit width of the scalar is larger than 32bit, such as double
215
+ // and long. Hence, we add 8 bytes buffer dedicated to storing the scalar
216
+ // value regardless its bit width is less or greater than 32bits.
217
+ char buffer_[8] = {0}; // 64bits
218
+ };
219
+
220
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
221
+ class RegisterCodeGenList {
222
+ public:
223
+ TORCH_API static RegisterCodeGenList& GetInstance() {
224
+ static RegisterCodeGenList codegen_list;
225
+ return codegen_list;
226
+ }
227
+
228
+ using StmtFactoryMethod = std::function<std::unique_ptr<CodeGen>(
229
+ StmtPtr stmt,
230
+ const std::vector<CodeGen::BufferArg>&,
231
+ at::Device device,
232
+ const std::string& kernel_func_name)>;
233
+
234
+ TORCH_API StmtFactoryMethod FindStmtFactoryMethod(const std::string& name);
235
+ RegisterCodeGenList(const RegisterCodeGenList&) = delete;
236
+ RegisterCodeGenList& operator=(const RegisterCodeGenList&) = delete;
237
+
238
+ private:
239
+ template <class CodeGenType>
240
+ friend class RegisterCodeGen;
241
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
242
+ RegisterCodeGenList() = default;
243
+ TORCH_API void AddStmtFactoryMethod(
244
+ const std::string& name,
245
+ const StmtFactoryMethod& stmt_factory_method);
246
+
247
+ std::unordered_map<std::string, StmtFactoryMethod> stmt_factory_methods_;
248
+ };
249
+
250
+ template <class CodeGenType>
251
+ class RegisterCodeGen {
252
+ public:
253
+ explicit RegisterCodeGen(const std::string& name) {
254
+ RegisterCodeGenList& codegen_list = RegisterCodeGenList::GetInstance();
255
+ codegen_list.AddStmtFactoryMethod(
256
+ name,
257
+ [](StmtPtr stmt,
258
+ const std::vector<CodeGen::BufferArg>& params,
259
+ at::Device device,
260
+ const std::string& kernel_func_name) {
261
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
262
+ std::unique_ptr<CodeGen> method(
263
+ new CodeGenType(stmt, params, device, kernel_func_name));
264
+ return method;
265
+ });
266
+ }
267
+ };
268
+
269
+ TORCH_API std::unique_ptr<CodeGen> CreateCodeGen(
270
+ const std::string& name,
271
+ StmtPtr stmt,
272
+ const std::vector<CodeGen::BufferArg>& params,
273
+ at::Device device = at::kCPU,
274
+ const std::string& kernel_func_name = "func");
275
+
276
+ class TORCH_API GenericIntrinsicsExpander : public IRMutator {
277
+ protected:
278
+ ExprPtr mutate(IntrinsicsPtr v) override;
279
+ };
280
+
281
+ } // namespace tensorexpr
282
+ } // namespace jit
283
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace tensorexpr {
9
+
10
+ class CppVarNameRewriter;
11
+
12
+ // Generates C++ code from the IR.
13
+ //
14
+ // Vector operations are unrolled.
15
+ // For example:
16
+ // C[Ramp(0, 1, 3)] = A[Ramp(0, 2, 3)] + B[Ramp(0, 3, 3)];
17
+ // is unrolled into:
18
+ // C[0] = A[0] + B[0];
19
+ // C[1] = A[2] + B[3];
20
+ // C[2] = A[4] + B[6];
21
+ class TORCH_API CppPrinter : public IRPrinter {
22
+ public:
23
+ explicit CppPrinter(std::ostream* os);
24
+ ~CppPrinter() override;
25
+
26
+ void printPrologue();
27
+
28
+ using IRPrinter::visit;
29
+
30
+ // Binary expressions.
31
+ void visit(ModPtr) override;
32
+ void visit(MaxPtr) override;
33
+ void visit(MinPtr) override;
34
+
35
+ // Conditional expressions.
36
+ void visit(CompareSelectPtr) override;
37
+ void visit(IfThenElsePtr) override;
38
+
39
+ // Tensor operations.
40
+ void visit(AllocatePtr) override;
41
+ void visit(FreePtr) override;
42
+ void visit(LoadPtr) override;
43
+ void visit(StorePtr) override;
44
+
45
+ // Casts.
46
+ void visit(CastPtr) override;
47
+ void visit(BitCastPtr) override;
48
+
49
+ // Calls.
50
+ void visit(IntrinsicsPtr) override;
51
+ void visit(ExternalCallPtr) override;
52
+
53
+ // Vars.
54
+ void visit(LetPtr) override;
55
+ void visit(VarPtr) override;
56
+
57
+ // Vector data types.
58
+ void visit(RampPtr) override;
59
+ void visit(BroadcastPtr) override;
60
+
61
+ private:
62
+ int lane_;
63
+ std::unordered_map<VarPtr, ExprPtr> vector_vars_;
64
+ };
65
+
66
+ class TORCH_API CppCodeGen : public CodeGen {
67
+ public:
68
+ CppCodeGen(
69
+ StmtPtr stmt,
70
+ const std::vector<BufferArg>& buffer_args,
71
+ at::Device device = at::kCPU,
72
+ const std::string& kernel_func_name = "func");
73
+
74
+ ~CppCodeGen() override;
75
+
76
+ void call(const std::vector<CallArg>& args) override;
77
+ void call_raw(const std::vector<void*>& args) override;
78
+
79
+ template <typename... Ts>
80
+ void operator()(const Ts&... ts) {
81
+ call(std::vector<CallArg>({CallArg(ts)...}));
82
+ }
83
+
84
+ std::string getCodeText(const std::string& attr = "") override {
85
+ return oss_.str();
86
+ }
87
+
88
+ private:
89
+ void init();
90
+
91
+ std::ostream& os() {
92
+ return printer_->os();
93
+ }
94
+
95
+ std::ostringstream oss_;
96
+ std::unique_ptr<CppPrinter> printer_;
97
+ std::unique_ptr<CppVarNameRewriter> var_name_rewriter_;
98
+ };
99
+
100
+ } // namespace tensorexpr
101
+ } // namespace jit
102
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ namespace tensorexpr {
6
+
7
+ constexpr auto cpp_intrinsics_definition = R"(
8
+ namespace std {
9
+
10
+ template <typename T,
11
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
12
+ T rsqrt(T v) {
13
+ return 1.0f / std::sqrt(v);
14
+ }
15
+
16
+ template <typename T,
17
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
18
+ T frac(T v) {
19
+ T intpart;
20
+ return std::modf(v, &intpart);
21
+ }
22
+
23
+ template <typename From, typename To>
24
+ To bitcast(const From& v) {
25
+ assert(sizeof(To) == sizeof(From));
26
+ To res;
27
+ std::memcpy(&res, &v, sizeof(From));
28
+ return res;
29
+ }
30
+
31
+ } // namespace std
32
+ )";
33
+
34
+ } // namespace tensorexpr
35
+ } // namespace jit
36
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ namespace tensorexpr {
6
+
7
+ constexpr auto philox_random_string = R"(
8
+
9
+ class Philox {
10
+ public:
11
+ __device__ inline Philox(unsigned long long seed,
12
+ unsigned long long subsequence,
13
+ unsigned long long offset) {
14
+ key.x = (unsigned int)seed;
15
+ key.y = (unsigned int)(seed >> 32);
16
+ counter = make_uint4(0, 0, 0, 0);
17
+ counter.z = (unsigned int)(subsequence);
18
+ counter.w = (unsigned int)(subsequence >> 32);
19
+ STATE = 0;
20
+ incr_n(offset / 4);
21
+ }
22
+
23
+ __device__ inline unsigned long operator()() {
24
+ if(STATE == 0) {
25
+ uint4 counter_ = counter;
26
+ uint2 key_ = key;
27
+ for(int i = 0; i < 9; i++) {
28
+ counter_ = single_round(counter_, key_);
29
+ key_.x += (kPhilox10A); key_.y += (kPhilox10B);
30
+ }
31
+ output = single_round(counter_, key_);
32
+ incr();
33
+ }
34
+ unsigned long ret;
35
+ switch(STATE) {
36
+ case 0: ret = output.x; break;
37
+ case 1: ret = output.y; break;
38
+ case 2: ret = output.z; break;
39
+ case 3: ret = output.w; break;
40
+ }
41
+ STATE = (STATE + 1) % 4;
42
+ return ret;
43
+ }
44
+
45
+ private:
46
+ uint4 counter;
47
+ uint4 output;
48
+ uint2 key;
49
+ unsigned int STATE;
50
+ __device__ inline void incr_n(unsigned long long n) {
51
+ unsigned int nlo = (unsigned int)(n);
52
+ unsigned int nhi = (unsigned int)(n >> 32);
53
+ counter.x += nlo;
54
+ if (counter.x < nlo)
55
+ nhi++;
56
+ counter.y += nhi;
57
+ if (nhi <= counter.y)
58
+ return;
59
+ if (++counter.z)
60
+ return;
61
+ ++counter.w;
62
+ }
63
+ __device__ inline void incr() {
64
+ if (++counter.x)
65
+ return;
66
+ if (++counter.y)
67
+ return;
68
+ if (++counter.z)
69
+ return;
70
+ ++counter.w;
71
+ }
72
+ __device__ unsigned int mulhilo32(unsigned int a, unsigned int b,
73
+ unsigned int *result_high) {
74
+ *result_high = __umulhi(a, b);
75
+ return a*b;
76
+ }
77
+
78
+ __device__ inline uint4 single_round(uint4 ctr, uint2 key) {
79
+ unsigned int hi0;
80
+ unsigned int hi1;
81
+ unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);
82
+ unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);
83
+
84
+ uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};
85
+ return ret;
86
+ }
87
+
88
+ static const unsigned long kPhilox10A = 0x9E3779B9;
89
+ static const unsigned long kPhilox10B = 0xBB67AE85;
90
+ static const unsigned long kPhiloxSA = 0xD2511F53;
91
+ static const unsigned long kPhiloxSB = 0xCD9E8D57;
92
+ };
93
+
94
+ // Inverse of 2^32.
95
+ #define M_RAN_INVM32 2.3283064e-10f
96
+ __device__ __inline__ float Uint32ToFloat(unsigned int x) {
97
+ return x * M_RAN_INVM32;
98
+ }
99
+
100
+ )";
101
+
102
+ } // namespace tensorexpr
103
+ } // namespace jit
104
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cmath>
4
+ #include <cstring>
5
+ #include <type_traits>
6
+ #include <unordered_map>
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ #include <c10/macros/Macros.h>
11
+ #include <c10/util/Logging.h>
12
+ #include <c10/util/math_compat.h>
13
+ #include <c10/util/string_utils.h>
14
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
15
+ #include <torch/csrc/jit/tensorexpr/exceptions.h>
16
+ #include <torch/csrc/jit/tensorexpr/ir.h>
17
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
18
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
19
+ #include <torch/csrc/jit/tensorexpr/types.h>
20
+ #include <torch/csrc/jit/tensorexpr/var_substitutor.h>
21
+
22
+ namespace torch {
23
+ namespace jit {
24
+ namespace tensorexpr {
25
+
26
+ class InterpValue {
27
+ public:
28
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
29
+ InterpValue() : dtype_(kInt) {
30
+ Intvalues.push_back(0);
31
+ }
32
+
33
+ template <typename T>
34
+ InterpValue(Dtype dtype, T v) : dtype_(dtype) {
35
+ #define TYPE_CASE(Type, Name) \
36
+ if (dtype == k##Name) { \
37
+ Name##values.push_back(v); \
38
+ return; \
39
+ }
40
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
41
+ #undef TYPE_CASE
42
+ throw unsupported_dtype();
43
+ }
44
+
45
+ #define VALUE_CTOR(Type, Name) \
46
+ InterpValue(Type v) : dtype_(k##Name) { \
47
+ Name##values.push_back(v); \
48
+ }
49
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
50
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_CTOR);
51
+ #undef VALUE_CTOR
52
+
53
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
54
+ explicit InterpValue(c10::quint8 v) : dtype_(kQUInt8) {
55
+ QUInt8values.emplace_back(v.val_);
56
+ }
57
+
58
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
59
+ explicit InterpValue(c10::qint8 v) : dtype_(kQInt8) {
60
+ QInt8values.emplace_back(v.val_);
61
+ }
62
+
63
+ #define VALUE_VEC_CTOR(Type, Name) \
64
+ InterpValue(const std::vector<Type>& v) \
65
+ : dtype_(Dtype(k##Name, v.size())), Name##values(v) {}
66
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
67
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_VEC_CTOR);
68
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
69
+ VALUE_VEC_CTOR(c10::quint8, QUInt8)
70
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
71
+ VALUE_VEC_CTOR(c10::qint8, QInt8)
72
+ #undef VALUE_VEC_CTOR
73
+
74
+ template <typename T>
75
+ T as() const;
76
+
77
+ template <typename T>
78
+ const std::vector<T>& as_vec() const;
79
+
80
+ int64_t intValue() const;
81
+
82
+ Dtype dtype() const {
83
+ return dtype_;
84
+ }
85
+
86
+ private:
87
+ Dtype dtype_;
88
+
89
+ #define VALUE_STORAGE(Type, Name) std::vector<Type> Name##values;
90
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_STORAGE);
91
+ VALUE_STORAGE(c10::qint8, QInt8);
92
+ VALUE_STORAGE(c10::quint8, QUInt8);
93
+ #undef VALUE_STORAGE
94
+ void* ptr;
95
+ };
96
+
97
+ #define VALUE_AS_DISPATCH(Type, Name) \
98
+ template <> \
99
+ inline Type InterpValue::as<Type>() const { \
100
+ if (dtype_ != k##Name) { \
101
+ throw unsupported_dtype(); \
102
+ } \
103
+ return Name##values[0]; \
104
+ }
105
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_DISPATCH);
106
+ VALUE_AS_DISPATCH(c10::quint8, QUInt8);
107
+ VALUE_AS_DISPATCH(c10::qint8, QInt8);
108
+ #undef VALUE_AS_DISPATCH
109
+
110
+ #define VALUE_AS_VEC_DISPATCH(Type, Name) \
111
+ template <> \
112
+ inline const std::vector<Type>& InterpValue::as_vec<Type>() const { \
113
+ if (dtype_.scalar_type() != ScalarType::Name) { \
114
+ throw unsupported_dtype(); \
115
+ } \
116
+ return Name##values; \
117
+ }
118
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_VEC_DISPATCH);
119
+ VALUE_AS_VEC_DISPATCH(c10::quint8, QUInt8);
120
+ VALUE_AS_VEC_DISPATCH(c10::qint8, QInt8);
121
+ #undef VALUE_AS_VEC_DISPATCH
122
+
123
+ template <typename Type>
124
+ auto underlyingValue(Type x) {
125
+ return x;
126
+ }
127
+
128
+ template <>
129
+ inline auto underlyingValue<c10::quint8>(c10::quint8 x) {
130
+ return x.val_;
131
+ }
132
+
133
+ template <>
134
+ inline auto underlyingValue<c10::qint8>(c10::qint8 x) {
135
+ return x.val_;
136
+ }
137
+
138
+ template <typename To, typename From>
139
+ To raw_bitcast(const From& src) {
140
+ TORCH_CHECK(sizeof(To) == sizeof(From), "Invalid bitcast invocation");
141
+ To storage;
142
+ std::memcpy(&storage, &src, sizeof(To));
143
+ return reinterpret_cast<To&>(storage);
144
+ }
145
+
146
+ class SimpleIREvaluatorImpl;
147
+ class TORCH_API SimpleIREvaluator : public CodeGen {
148
+ public:
149
+ SimpleIREvaluator(
150
+ StmtPtr stmt,
151
+ const std::vector<BufferArg>& buffer_args,
152
+ at::Device device = at::kCPU,
153
+ const std::string& kernel_func_name = "func");
154
+
155
+ ~SimpleIREvaluator() override;
156
+
157
+ void call(const std::vector<CallArg>& args) override;
158
+ void call_raw(const std::vector<void*>& args) override;
159
+
160
+ template <typename... Ts>
161
+ void operator()(const Ts&... ts) {
162
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
163
+ std::vector<CallArg> args({CallArg(ts)...});
164
+ call(args);
165
+ }
166
+
167
+ void bindVar(VarPtr v, ExprPtr e);
168
+ InterpValue value() const;
169
+
170
+ private:
171
+ void bindArg(const BufferArg& buf, void* data);
172
+ void expand_intrinsics() {
173
+ GenericIntrinsicsExpander intrinsics_expander;
174
+ apply_mutator(&intrinsics_expander);
175
+ }
176
+
177
+ std::unique_ptr<SimpleIREvaluatorImpl> impl_;
178
+ };
179
+
180
+ template <class CodeGenType>
181
+ class ExprEval {
182
+ public:
183
+ using BufferArg = CodeGen::BufferArg;
184
+ using CallArg = CodeGen::CallArg;
185
+
186
+ template <typename... Ts>
187
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
188
+ ExprEval(const ExprHandle& expr, Ts... ts)
189
+ : ExprEval(expr, {BufferArg(ts)...}) {}
190
+
191
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
192
+ ExprEval(const ExprHandle& expr, const std::vector<BufferArg>& buffer_args)
193
+ : dtype_(expr.dtype()) {
194
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
195
+ std::vector<BufferArg> buffer_args_extended = buffer_args;
196
+ BufHandle ret_buf("ret_val", {1}, dtype_);
197
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
198
+ std::vector<ExprHandle> indices;
199
+ ExprHandle zero = IntImm::make(0);
200
+ for (size_t i = 0; i < ret_buf.ndim(); i++) {
201
+ indices.push_back(zero);
202
+ }
203
+ StmtPtr store_stmt = Store::make(ret_buf, indices, expr);
204
+ buffer_args_extended.emplace_back(ret_buf);
205
+ codegen_.reset(new CodeGenType(store_stmt, buffer_args_extended));
206
+ }
207
+
208
+ template <typename... Ts>
209
+ void operator()(Ts... ts) {
210
+ call(ts...);
211
+ }
212
+
213
+ void operator()(const std::vector<CallArg>& call_args) {
214
+ call(call_args);
215
+ }
216
+
217
+ void bindVar(VarPtr v, ExprPtr e) {
218
+ codegen_->bindVar(v, e);
219
+ }
220
+
221
+ void bindVar(const VarHandle& v, const ExprHandle& e) {
222
+ codegen_->bindVar(v.node(), e.node());
223
+ }
224
+
225
+ template <typename... Ts>
226
+ void call(Ts... ts) {
227
+ call({CallArg(ts)...});
228
+ }
229
+
230
+ void call(const std::vector<CallArg>& call_args) {
231
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
232
+ std::vector<CallArg> call_args_extended = call_args;
233
+ switch (dtype_.scalar_type()) {
234
+ #define TYPE_CASE(Type, Name) \
235
+ case ScalarType::Name: { \
236
+ std::vector<Type> ret_val_arg(1); \
237
+ call_args_extended.push_back(CallArg(ret_val_arg)); \
238
+ codegen_->call(call_args_extended); \
239
+ ret_value_ = InterpValue(ret_val_arg[0]); \
240
+ } break;
241
+ // NOLINTNEXTLINE(modernize-use-emplace)
242
+ AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
243
+ // NOLINTNEXTLINE(modernize-use-emplace)
244
+ TYPE_CASE(c10::quint8, QUInt8);
245
+ // NOLINTNEXTLINE(modernize-use-emplace)
246
+ TYPE_CASE(c10::qint8, QInt8);
247
+ #undef TYPE_CASE
248
+ case ScalarType::Bool: {
249
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
250
+ std::vector<unsigned char> ret_val_arg(1);
251
+ call_args_extended.emplace_back(ret_val_arg.data());
252
+ codegen_->call(call_args_extended);
253
+ ret_value_ = InterpValue((bool)ret_val_arg[0]);
254
+ } break;
255
+ default:
256
+ throw unsupported_dtype();
257
+ }
258
+ }
259
+
260
+ void call_raw(const std::vector<void*>& args) {
261
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
262
+ std::vector<void*> args_extended = args;
263
+ switch (dtype_.scalar_type()) {
264
+ #define TYPE_CASE(Type, Name) \
265
+ case ScalarType::Name: { \
266
+ std::vector<Type> ret_val_arg(1); \
267
+ args_extended.push_back(ret_val_arg.data()); \
268
+ codegen_->call_raw(args_extended); \
269
+ ret_value_ = InterpValue(ret_val_arg[0]); \
270
+ } break;
271
+ AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
272
+ TYPE_CASE(c10::quint8, QUInt8);
273
+ TYPE_CASE(c10::qint8, QInt8);
274
+ #undef TYPE_CASE
275
+ case ScalarType::Bool: {
276
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
277
+ std::vector<unsigned char> ret_val_arg(1);
278
+ args_extended.push_back(ret_val_arg.data());
279
+ codegen_->call_raw(args_extended);
280
+ ret_value_ = InterpValue((bool)ret_val_arg[0]);
281
+ } break;
282
+ default:
283
+ throw unsupported_dtype();
284
+ }
285
+ }
286
+
287
+ template <typename T>
288
+ T value(const std::vector<void*>& args) {
289
+ call_raw(args);
290
+ return ret_value_.as<T>();
291
+ }
292
+
293
+ template <typename T, typename... Ts>
294
+ T value(Ts... ts) {
295
+ call(std::forward<Ts>(ts)...);
296
+ return ret_value_.as<T>();
297
+ }
298
+
299
+ Dtype dtype() {
300
+ return dtype_;
301
+ }
302
+
303
+ private:
304
+ Dtype dtype_;
305
+ std::unique_ptr<CodeGenType> codegen_;
306
+ InterpValue ret_value_;
307
+ };
308
+
309
+ // Evaluates the given expression and returns an int64_t value if the result of
310
+ // the given expression is int64_t.
311
+ c10::optional<int64_t> evalInt(ExprPtr e);
312
+
313
+ // Substitutes the given vars with their corresponding expressions in the input
314
+ // expression.
315
+ inline ExprPtr Substitute(ExprPtr expr, const VarMapping& var_mapping) {
316
+ VarSubMutator var_sub(var_mapping);
317
+ return expr->accept_mutator(&var_sub);
318
+ }
319
+
320
+ // Substitutes the given vars with their corresponding expressions in the input
321
+ // statement.
322
+ inline StmtPtr Substitute(StmtPtr stmt, const VarMapping& var_mapping) {
323
+ VarSubMutator var_sub(var_mapping);
324
+ return stmt->accept_mutator(&var_sub);
325
+ }
326
+
327
+ // Creates a clone of the input expression and substitutes the given vars with
328
+ // their corresponding expressions in the clone.
329
+ // NOTE: This works because cloning reuses variables and does not create new
330
+ // ones, and `VarMapping` input has variables as the key.
331
+ inline ExprPtr SubstituteInClone(ExprPtr expr, const VarMapping& var_mapping) {
332
+ VarSubMutator var_sub(var_mapping);
333
+ return Expr::clone(std::move(expr))->accept_mutator(&var_sub);
334
+ }
335
+
336
+ // Creates a clone of the input statement and substitutes the given vars with
337
+ // their corresponding expressions in the clone.
338
+ // NOTE: This works because cloning reuses variables and does not create new
339
+ // ones, and `VarMapping` input has variables as the key.
340
+ inline StmtPtr SubstituteInClone(StmtPtr stmt, const VarMapping& var_mapping) {
341
+ VarSubMutator var_sub(var_mapping);
342
+ return Stmt::clone(std::move(stmt))->accept_mutator(&var_sub);
343
+ }
344
+
345
+ } // namespace tensorexpr
346
+ } // namespace jit
347
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
5
+
6
+ #include <sstream>
7
+ #include <stdexcept>
8
+
9
+ // Forward declarations of types
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+ class Expr;
14
+ class Stmt;
15
+ } // namespace tensorexpr
16
+ } // namespace jit
17
+ } // namespace torch
18
+
19
+ // Forward declarations of functions
20
+ namespace std {
21
+ TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr);
22
+ TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr);
23
+ } // namespace std
24
+
25
+ namespace torch {
26
+ namespace jit {
27
+ namespace tensorexpr {
28
+
29
+ class unsupported_dtype : public std::runtime_error {
30
+ public:
31
+ explicit unsupported_dtype() : std::runtime_error("UNSUPPORTED DTYPE") {}
32
+ explicit unsupported_dtype(const std::string& err)
33
+ : std::runtime_error("UNSUPPORTED DTYPE: " + err) {}
34
+ };
35
+
36
+ class out_of_range_index : public std::runtime_error {
37
+ public:
38
+ explicit out_of_range_index() : std::runtime_error("OUT OF RANGE INDEX") {}
39
+ explicit out_of_range_index(const std::string& err)
40
+ : std::runtime_error("OUT OF RANGE INDEX: " + err) {}
41
+ };
42
+
43
+ class unimplemented_lowering : public std::runtime_error {
44
+ public:
45
+ explicit unimplemented_lowering()
46
+ : std::runtime_error("UNIMPLEMENTED LOWERING") {}
47
+ explicit unimplemented_lowering(ExprPtr expr)
48
+ : std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(expr)) {}
49
+ explicit unimplemented_lowering(StmtPtr stmt)
50
+ : std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(stmt)) {}
51
+ };
52
+
53
+ class malformed_input : public std::runtime_error {
54
+ public:
55
+ explicit malformed_input() : std::runtime_error("MALFORMED INPUT") {}
56
+ explicit malformed_input(const std::string& err)
57
+ : std::runtime_error("MALFORMED INPUT: " + err) {}
58
+ explicit malformed_input(ExprPtr expr)
59
+ : std::runtime_error("MALFORMED INPUT: " + std::to_string(expr)) {}
60
+ explicit malformed_input(const std::string& err, ExprPtr expr)
61
+ : std::runtime_error(
62
+ "MALFORMED INPUT: " + err + " - " + std::to_string(expr)) {}
63
+ explicit malformed_input(StmtPtr stmt)
64
+ : std::runtime_error("MALFORMED INPUT: " + std::to_string(stmt)) {}
65
+ explicit malformed_input(const std::string& err, StmtPtr stmt)
66
+ : std::runtime_error(
67
+ "MALFORMED INPUT: " + err + " - " + std::to_string(stmt)) {}
68
+ };
69
+
70
+ class malformed_ir : public std::runtime_error {
71
+ public:
72
+ explicit malformed_ir() : std::runtime_error("MALFORMED IR") {}
73
+ explicit malformed_ir(const std::string& err)
74
+ : std::runtime_error("MALFORMED IR: " + err) {}
75
+ explicit malformed_ir(ExprPtr expr)
76
+ : std::runtime_error("MALFORMED IR: " + std::to_string(expr)) {}
77
+ explicit malformed_ir(const std::string& err, ExprPtr expr)
78
+ : std::runtime_error(
79
+ "MALFORMED IR: " + err + " - " + std::to_string(expr)) {}
80
+ explicit malformed_ir(StmtPtr stmt)
81
+ : std::runtime_error("MALFORMED IR: " + std::to_string(stmt)) {}
82
+ explicit malformed_ir(const std::string& err, StmtPtr stmt)
83
+ : std::runtime_error(
84
+ "MALFORMED IR: " + err + " - " + std::to_string(stmt)) {}
85
+ };
86
+
87
+ TORCH_API std::string buildErrorMessage(const std::string& s = "");
88
+
89
+ } // namespace tensorexpr
90
+ } // namespace jit
91
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/Parallel.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <cstdint>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace tensorexpr {
11
+
12
+ #ifdef C10_MOBILE
13
+ extern "C" {
14
+ #endif
15
+ void DispatchParallel(
16
+ int8_t* func,
17
+ int64_t start,
18
+ int64_t stop,
19
+ int8_t* packed_data) noexcept;
20
+
21
+ TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept;
22
+
23
+ #ifdef C10_MOBILE
24
+ } // extern "C"
25
+ #endif
26
+
27
+ } // namespace tensorexpr
28
+ } // namespace jit
29
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstdint>
5
+ #include <string>
6
+ #include <unordered_map>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace tensorexpr {
11
+
12
+ // The external functions that could be called from NNC must have the same
13
+ // signature defined by `NNCExternalFunction`.
14
+ //
15
+ // Why this signature?
16
+ // It was picked for two reasons: 1) it should be generic enough to represent
17
+ // most of the ops we might want to call, 2) it should be possible to generate a
18
+ // code for this call in LLVM codegen.
19
+ // The first 5 parameters allow to pass any number of contiguous CPU tensors in
20
+ // case we need to run aten ops (TODO: support different devices). The first
21
+ // buffer in the array is assumed to be the output buffer. We couldn't use
22
+ // `at::Tensor` (or `c10::IValue`) type there directly as it would mean that
23
+ // we'd need to declare it in LLVM codegen in LLVM IR form, which would be very
24
+ // cumbersome and hard to maintain. Note that the dimensions of all tensors are
25
+ // concatenated into a single array buf_dims. We do not need to pass its length,
26
+ // since it can be deduced from total number of buffers and their ranks.
27
+ //
28
+ // The last 2 arguments allow to pass any non-tensor arguments encoded as an
29
+ // array of int64_t values. The way they are encoded is not specified and could
30
+ // be arbitrary - whatever the most convenient for the specific bridge function
31
+ // is.
32
+ //
33
+ // The bridge functions must not throw exceptions - properly propagating them
34
+ // from the generated code is too cumbersome, and thus all calls to functions
35
+ // that could throw must be wrapped with try-catch blocks.
36
+ using NNCExternalFunction = void (*)(
37
+ int64_t bufs_num,
38
+ void** buf_data,
39
+ int64_t* buf_ranks,
40
+ int64_t* buf_dims,
41
+ int64_t* buf_strides,
42
+ int8_t* buf_dtypes,
43
+ int64_t args_num,
44
+ int64_t* extra_args);
45
+
46
+ // Return a global map "function-name" -> "function-pointer" for all registered
47
+ // in NNC external functions
48
+ TORCH_API std::unordered_map<std::string, NNCExternalFunction>&
49
+ getNNCFunctionRegistry();
50
+
51
+ // To register a new external function in NNC one needs to create an instance of
52
+ // this struct
53
+ struct RegisterNNCExternalFunction {
54
+ RegisterNNCExternalFunction(const std::string& name, NNCExternalFunction fn) {
55
+ getNNCFunctionRegistry()[name] = fn;
56
+ }
57
+ };
58
+
59
+ } // namespace tensorexpr
60
+ } // namespace jit
61
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/ScalarType.h>
3
+ #include <memory>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ namespace tensorexpr {
8
+
9
+ template <typename Node>
10
+ using NodePtr = std::shared_ptr<Node>;
11
+
12
+ template <typename To, typename From>
13
+ NodePtr<To> to(NodePtr<From> x) {
14
+ return std::dynamic_pointer_cast<To>(x);
15
+ }
16
+
17
+ template <typename To, typename From>
18
+ NodePtr<To> static_to(NodePtr<From> x) {
19
+ return std::static_pointer_cast<To>(x);
20
+ }
21
+
22
+ template <typename Node, typename... Args>
23
+ NodePtr<Node> alloc(Args&&... args) {
24
+ return std::make_shared<Node>(std::forward<Args>(args)...);
25
+ }
26
+
27
+ class Buf;
28
+ class Expr;
29
+ class Stmt;
30
+ class Var;
31
+
32
+ using BufPtr = NodePtr<Buf>;
33
+ using ExprPtr = NodePtr<Expr>;
34
+ using StmtPtr = NodePtr<Stmt>;
35
+ using VarPtr = NodePtr<Var>;
36
+
37
+ class ExprHandle;
38
+ class VarHandle;
39
+ class BufHandle;
40
+
41
+ class Add;
42
+ class And;
43
+ class BitCast;
44
+ class Broadcast;
45
+ class Cast;
46
+ class CompareSelect;
47
+ class Div;
48
+ class IfThenElse;
49
+ class Intrinsics;
50
+ class Let;
51
+ class Load;
52
+ class Lshift;
53
+ class Max;
54
+ class MaxTerm;
55
+ class Min;
56
+ class MinTerm;
57
+ class Mod;
58
+ class Mul;
59
+ class Or;
60
+ class Polynomial;
61
+ class Ramp;
62
+ class ReduceOp;
63
+ class RoundOff;
64
+ class Rshift;
65
+ class Store;
66
+ class Sub;
67
+ class Term;
68
+ class Xor;
69
+ using AddPtr = NodePtr<Add>;
70
+ using AndPtr = NodePtr<And>;
71
+ using BitCastPtr = NodePtr<BitCast>;
72
+ using BroadcastPtr = NodePtr<Broadcast>;
73
+ using CastPtr = NodePtr<Cast>;
74
+ using CompareSelectPtr = NodePtr<CompareSelect>;
75
+ using DivPtr = NodePtr<Div>;
76
+ using IfThenElsePtr = NodePtr<IfThenElse>;
77
+ using IntrinsicsPtr = NodePtr<Intrinsics>;
78
+ using LetPtr = NodePtr<Let>;
79
+ using LoadPtr = NodePtr<Load>;
80
+ using LshiftPtr = NodePtr<Lshift>;
81
+ using MaxPtr = NodePtr<Max>;
82
+ using MaxTermPtr = NodePtr<MaxTerm>;
83
+ using MinPtr = NodePtr<Min>;
84
+ using MinTermPtr = NodePtr<MinTerm>;
85
+ using ModPtr = NodePtr<Mod>;
86
+ using MulPtr = NodePtr<Mul>;
87
+ using OrPtr = NodePtr<Or>;
88
+ using PolynomialPtr = NodePtr<Polynomial>;
89
+ using RampPtr = NodePtr<Ramp>;
90
+ using ReduceOpPtr = NodePtr<ReduceOp>;
91
+ using RoundOffPtr = NodePtr<RoundOff>;
92
+ using RshiftPtr = NodePtr<Rshift>;
93
+ using StorePtr = NodePtr<Store>;
94
+ using SubPtr = NodePtr<Sub>;
95
+ using TermPtr = NodePtr<Term>;
96
+ using XorPtr = NodePtr<Xor>;
97
+
98
+ class Allocate;
99
+ class AtomicAdd;
100
+ class Block;
101
+ class Cond;
102
+ class ExternalCall;
103
+ class ExternalCallWithAlloc;
104
+ class For;
105
+ class Free;
106
+ class FreeExt;
107
+ class PlacementAllocate;
108
+ class SyncThreads;
109
+ using AllocatePtr = NodePtr<Allocate>;
110
+ using AtomicAddPtr = NodePtr<AtomicAdd>;
111
+ using BlockPtr = NodePtr<Block>;
112
+ using CondPtr = NodePtr<Cond>;
113
+ using ExternalCallPtr = NodePtr<ExternalCall>;
114
+ using ExternalCallWithAllocPtr = NodePtr<ExternalCallWithAlloc>;
115
+ using ForPtr = NodePtr<For>;
116
+ using FreePtr = NodePtr<Free>;
117
+ using FreeExtPtr = NodePtr<FreeExt>;
118
+ using PlacementAllocatePtr = NodePtr<PlacementAllocate>;
119
+ using SyncThreadsPtr = NodePtr<SyncThreads>;
120
+
121
+ #define IMM_DECLARE(Type, Name) \
122
+ class Name##Imm; \
123
+ using Name##ImmPtr = NodePtr<Name##Imm>;
124
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
125
+ #undef IMM_DECLARE
126
+
127
+ } // namespace tensorexpr
128
+ } // namespace jit
129
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ namespace tensorexpr {
8
+
9
+ // Optimize aten::cat ops in the given subgraph.
10
+ //
11
+ // Moving users of cat to its inputs.
12
+ // Cat ops get lowered into multiple loops, one per input. When the result
13
+ // of cat is used by some other op, it results in a situation where inlining
14
+ // of cat does not happen. This in turn results in intermediate buffers
15
+ // being created for the result of cat, since it is not inlined.
16
+ //
17
+ // For example, consider the following graph:
18
+ // graph(%x : Float(10, strides=[1], device=cpu),
19
+ // %y : Float(20, strides=[1], device=cpu)):
20
+ // %dim : int = prim::Constant[value=0]()
21
+ // %xy_list : Tensor[] = prim::ListConstruct(%x, %y)
22
+ // %cat : Float(60, strides=[1], device=cpu) = aten::cat(%xy_list, %dim)
23
+ // %5 : Float(60, strides=[1], device=cpu) = aten::log(%cat)
24
+ // return (%5))IR";
25
+ //
26
+ // This will get lowered into:
27
+ // Allocate(aten_cat);
28
+ // for (...)
29
+ // aten_cat[...] = x[...]
30
+ // for (...)
31
+ // aten_cat[...] = y[...]
32
+ // for (...)
33
+ // aten_log[...] = log(aten_cat[...])
34
+ // Free(aten_cat);
35
+ // Note that aten_cat is not inlined into aten_log and it results in
36
+ // an intermediate buffer allocation as well.
37
+ //
38
+ // Optimization:
39
+ // We move the ops that use the result of `cat` into its inputs whenever
40
+ // possible.
41
+ //
42
+ // The graph above will be transformed to:
43
+ // graph(%x : Float(10, strides=[1], device=cpu),
44
+ // %y : Float(20, strides=[1], device=cpu)):
45
+ // %3 : int = prim::Constant[value=0]()
46
+ // %7 : Float(10, strides=[1], device=cpu) = aten::log(%x)
47
+ // %8 : Float(20, strides=[1], device=cpu) = aten::log(%y)
48
+ // %9 : Tensor[] = prim::ListConstruct(%7, %8)
49
+ // %10 : Float(60, strides=[1], device=cpu) = aten::cat(%9, %3)
50
+ // return (%10)
51
+ //
52
+ // This will get lowered into:
53
+ // for (...)
54
+ // aten_cat[...] = log(x[...])
55
+ // for (...)
56
+ // aten_cat[...] = log(y[...])
57
+ // aten_cat is the output buffer here.
58
+
59
+ bool OptimizeCat(const std::shared_ptr<Graph>& graph);
60
+
61
+ TORCH_API void annotateInputShapes(
62
+ const std::shared_ptr<Graph>& graph,
63
+ const std::vector<c10::optional<at::Tensor>>& example_inputs);
64
+ TORCH_API std::shared_ptr<Graph> removeUnusedSelfArgument(
65
+ const std::shared_ptr<Graph>& graph);
66
+ TORCH_API std::shared_ptr<Graph> removeGraphOutput(
67
+ const std::shared_ptr<Graph>& graph,
68
+ size_t idx);
69
+ TORCH_API std::shared_ptr<Graph> replaceListOutputWithTuple(
70
+ const std::shared_ptr<Graph>& graph);
71
+
72
+ // Perform \p ITERS rounds of "trimming" for the given \p GRAPH.
73
+ //
74
+ // Trimming means that we try to remove a small portion of the graph while
75
+ // keeping it valid. This is useful for debugging when we try to find a minimal
76
+ // example reproducing the issue at hand. When ITERS is 0, the graph remains
77
+ // unchanged, when ITERS is a big number, the graph usually becomes empty.
78
+ TORCH_API std::shared_ptr<Graph> trimGraph(
79
+ const std::shared_ptr<Graph>& graph,
80
+ int64_t iters);
81
+
82
+ // Scan all values in the given graph and replace each dimension with a size Xi
83
+ // present in \p SIZES with a symbolic shape Yi. Return a vector of symbol
84
+ // values [Y0, Y1, .., Yn].
85
+ //
86
+ // For example:
87
+ // Input:
88
+ // graph(%x : Float(10, 20, 30, 40)):
89
+ // %y : Float(10, 20, 30, 40) = aten::relu(%x)
90
+ // return %y
91
+ //
92
+ // If we run makeShapesSymbolic(graph, {20, 40}), then we'll get:
93
+ //
94
+ // graph(%x : Float(10, SS(-3), 30, SS(-5))):
95
+ // %y : Float(10, SS(-3), 30, SS(-5)) = aten::relu(%x)
96
+ // return %y
97
+ //
98
+ // and get {-3, -5} as the return value.
99
+ TORCH_API std::vector<int64_t> makeShapesSymbolic(
100
+ std::shared_ptr<Graph>& graph,
101
+ const std::vector<int64_t>& sizes);
102
+
103
+ // Inspect the graph and report whether it can be converted to TE IR.
104
+ // TODO: add error reporting for graphs that can't be converted.
105
+ TORCH_API bool isGraphCompilable(const std::shared_ptr<Graph>& graph);
106
+
107
+ // Examine the graph and (hackily) fill in missing tensor type info, such as
108
+ // scalar type, device, and strides. Ideally, this should be done by a proper
109
+ // dtype/device/shape propagation passes, but until they are ready we can use
110
+ // this, not always correct, workaround pass.
111
+ TORCH_API void fixupMissingShapeInfo(const std::shared_ptr<Graph>& graph);
112
+
113
+ } // namespace tensorexpr
114
+ } // namespace jit
115
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef TORCH_ENABLE_LLVM
4
+ #include <c10/util/ArrayRef.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace tensorexpr {
9
+
10
+ struct SymbolAddress {
11
+ const char* symbol;
12
+ void* address;
13
+
14
+ SymbolAddress(const char* sym, void* addr) : symbol(sym), address(addr) {}
15
+ };
16
+
17
+ c10::ArrayRef<SymbolAddress> getIntrinsicSymbols();
18
+
19
+ } // namespace tensorexpr
20
+ } // namespace jit
21
+ } // namespace torch
22
+ #endif // TORCH_ENABLE_LLVM
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h ADDED
@@ -0,0 +1,934 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <utility>
5
+ #include <vector>
6
+
7
+ #include <c10/util/string_utils.h>
8
+ #include <torch/csrc/jit/tensorexpr/exceptions.h>
9
+ #include <torch/csrc/jit/tensorexpr/expr.h>
10
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
11
+ #include <torch/csrc/jit/tensorexpr/stmt.h>
12
+
13
+ #include <ATen/core/ivalue.h>
14
+
15
+ namespace torch {
16
+ namespace jit {
17
+ namespace tensorexpr {
18
+
19
+ enum CompareSelectOperation {
20
+ kEQ = 0,
21
+ kGT,
22
+ kGE,
23
+ kLT,
24
+ kLE,
25
+ kNE,
26
+ };
27
+
28
+ enum CompareSelectBias {
29
+ kUnbiased,
30
+ kLikely,
31
+ kUnlikely,
32
+ };
33
+
34
+ inline int getPrecedence(IRNodeType ty) {
35
+ // Match C++ operator precedence rules, since some pretty-print expressions to
36
+ // C++. SEE: https://en.cppreference.com/w/cpp/language/operator_precedence
37
+ switch (ty) {
38
+ case kPrimitive:
39
+ return 0;
40
+ case kCast:
41
+ case kBitCast:
42
+ return 2;
43
+ case kAdd:
44
+ case kSub:
45
+ return 6;
46
+ case kMul:
47
+ case kDiv:
48
+ case kMod:
49
+ return 5;
50
+ case kMax:
51
+ case kMin:
52
+ return 99;
53
+ case kAnd:
54
+ return 11;
55
+ case kOr:
56
+ return 13;
57
+ case kLshift:
58
+ case kRshift:
59
+ return 7;
60
+ case kXor:
61
+ return 12;
62
+ case kCompareSelect:
63
+ return 16;
64
+ default:
65
+ return 99;
66
+ }
67
+ }
68
+
69
+ class TORCH_API Cast : public ExprNode<Cast> {
70
+ public:
71
+ ExprPtr src_value() const {
72
+ return src_value_;
73
+ }
74
+
75
+ void set_src_value(ExprPtr src_value) {
76
+ src_value_ = std::move(src_value);
77
+ }
78
+
79
+ static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
80
+ return ExprHandle(alloc<Cast>(dtype, src_value.node()));
81
+ }
82
+ Cast(Dtype dtype, ExprPtr src_value)
83
+ : ExprNodeBase(dtype, kCast), src_value_(std::move(src_value)) {}
84
+
85
+ bool isConstant() const override {
86
+ return src_value_->isConstant();
87
+ }
88
+
89
+ private:
90
+ ExprPtr src_value_;
91
+ };
92
+
93
+ template <typename T>
94
+ ExprHandle cast(const ExprHandle& src_value) {
95
+ return Cast::make(Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
96
+ }
97
+
98
+ // This is a bitwise cast, akin to bitcast in LLVM
99
+ class TORCH_API BitCast : public ExprNode<BitCast> {
100
+ public:
101
+ ExprPtr src_value() const {
102
+ return src_value_;
103
+ }
104
+
105
+ void set_src_value(ExprPtr src_value) {
106
+ src_value_ = std::move(src_value);
107
+ }
108
+
109
+ static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
110
+ return ExprHandle(alloc<BitCast>(dtype, src_value.node()));
111
+ }
112
+ BitCast(Dtype dtype, ExprPtr src_value)
113
+ : ExprNodeBase(dtype, kBitCast), src_value_(std::move(src_value)) {
114
+ TORCH_CHECK(src_value_->dtype().byte_size() == dtype.byte_size());
115
+ }
116
+
117
+ bool isConstant() const override {
118
+ return src_value_->isConstant();
119
+ }
120
+
121
+ private:
122
+ ExprPtr src_value_;
123
+ };
124
+
125
+ template <typename T>
126
+ ExprHandle bitcast(const ExprHandle& src_value) {
127
+ return BitCast::make(
128
+ Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
129
+ }
130
+
131
+ // Represent the expression node for binary operators.
132
+ // A CRTP pattern to share common code among the operators.
133
+ template <typename Op>
134
+ class BinaryOpNode : public ExprNode<Op> {
135
+ public:
136
+ ExprPtr lhs() const {
137
+ return this->lhs_;
138
+ }
139
+ ExprPtr rhs() const {
140
+ return this->rhs_;
141
+ }
142
+
143
+ void set_lhs(ExprPtr lhs) {
144
+ lhs_ = std::move(lhs);
145
+ }
146
+
147
+ void set_rhs(ExprPtr rhs) {
148
+ rhs_ = std::move(rhs);
149
+ }
150
+
151
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
152
+ return ExprHandle(alloc<Op>(lhs.node(), rhs.node()));
153
+ }
154
+
155
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
156
+ BinaryOpNode(
157
+ ExprPtr lhs_v,
158
+ ExprPtr rhs_v,
159
+ IRNodeType expr_type,
160
+ ScalarType ret_type = ScalarType::Undefined)
161
+ : ExprNode<Op>(
162
+ // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
163
+ BinaryOpDtype(lhs_v->dtype(), rhs_v->dtype(), ret_type),
164
+ expr_type),
165
+ lhs_(CastIfNeeded(std::move(lhs_v), ExprNode<Op>::dtype())),
166
+ rhs_(CastIfNeeded(std::move(rhs_v), ExprNode<Op>::dtype())) {}
167
+
168
+ private:
169
+ static ExprPtr CastIfNeeded(ExprPtr expr, Dtype dst_dtype) {
170
+ if (expr->dtype() == dst_dtype) {
171
+ return expr;
172
+ }
173
+ return Cast::make(dst_dtype, ExprHandle(std::move(expr))).node();
174
+ }
175
+
176
+ ExprPtr lhs_;
177
+ ExprPtr rhs_;
178
+ };
179
+
180
+ namespace detail {
181
+ template <typename T>
182
+ void bin_op_deducer(BinaryOpNode<T>);
183
+ bool bin_op_deducer(...);
184
+ } // namespace detail
185
+
186
+ class TORCH_API Add : public BinaryOpNode<Add> {
187
+ public:
188
+ Add(ExprPtr lhs, ExprPtr rhs)
189
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAdd) {}
190
+ };
191
+
192
+ class TORCH_API Sub : public BinaryOpNode<Sub> {
193
+ public:
194
+ Sub(ExprPtr lhs, ExprPtr rhs)
195
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kSub) {}
196
+ };
197
+
198
+ class TORCH_API Mul : public BinaryOpNode<Mul> {
199
+ public:
200
+ Mul(ExprPtr lhs, ExprPtr rhs)
201
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMul) {}
202
+ };
203
+
204
+ class TORCH_API Div : public BinaryOpNode<Div> {
205
+ public:
206
+ Div(ExprPtr lhs, ExprPtr rhs)
207
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kDiv) {}
208
+ };
209
+
210
+ class TORCH_API Mod : public BinaryOpNode<Mod> {
211
+ public:
212
+ Mod(ExprPtr lhs, ExprPtr rhs)
213
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMod) {}
214
+ };
215
+
216
+ template <typename Op>
217
+ class BitwiseOpNode : public BinaryOpNode<Op> {
218
+ public:
219
+ BitwiseOpNode(ExprPtr lhs, ExprPtr rhs, IRNodeType type)
220
+ : BinaryOpNode<Op>(std::move(lhs), std::move(rhs), type) {}
221
+
222
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
223
+ if (!lhs.dtype().is_integral()) {
224
+ throw unsupported_dtype();
225
+ }
226
+ if (lhs.dtype() != rhs.dtype()) {
227
+ throw malformed_input("lhs/rhs dtype mismatch");
228
+ }
229
+ return BinaryOpNode<Op>::make(lhs, rhs);
230
+ }
231
+ };
232
+
233
+ class TORCH_API And : public BitwiseOpNode<And> {
234
+ public:
235
+ And(ExprPtr lhs, ExprPtr rhs)
236
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAnd) {}
237
+ };
238
+
239
+ class TORCH_API Or : public BitwiseOpNode<Or> {
240
+ public:
241
+ Or(ExprPtr lhs, ExprPtr rhs)
242
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kOr) {}
243
+ };
244
+
245
+ class TORCH_API Xor : public BitwiseOpNode<Xor> {
246
+ public:
247
+ Xor(ExprPtr lhs, ExprPtr rhs)
248
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kXor) {}
249
+ };
250
+
251
+ class TORCH_API Lshift : public BitwiseOpNode<Lshift> {
252
+ public:
253
+ Lshift(ExprPtr lhs, ExprPtr rhs)
254
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kLshift) {}
255
+ };
256
+
257
+ class TORCH_API Rshift : public BitwiseOpNode<Rshift> {
258
+ public:
259
+ Rshift(ExprPtr lhs, ExprPtr rhs)
260
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kRshift) {}
261
+ };
262
+
263
+ // TODO: add TORCH_API
264
+ // Currently adding it results in a compilation error on Windows
265
+ class Max : public BinaryOpNode<Max> {
266
+ private:
267
+ bool propagate_nans_;
268
+
269
+ public:
270
+ Max(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
271
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMax),
272
+ propagate_nans_(propagate_nans) {}
273
+
274
+ bool propagate_nans() const {
275
+ return propagate_nans_;
276
+ }
277
+
278
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
279
+ static ExprHandle make(
280
+ const ExprHandle& lhs,
281
+ const ExprHandle& rhs,
282
+ bool propagate_nans) {
283
+ return ExprHandle(alloc<Max>(lhs.node(), rhs.node(), propagate_nans));
284
+ }
285
+ };
286
+
287
+ // TODO: add TORCH_API
288
+ // Currently adding it results in a compilation error on Windows
289
+ class Min : public BinaryOpNode<Min> {
290
+ private:
291
+ bool propagate_nans_;
292
+
293
+ public:
294
+ Min(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
295
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMin),
296
+ propagate_nans_(propagate_nans) {}
297
+
298
+ bool propagate_nans() const {
299
+ return propagate_nans_;
300
+ }
301
+
302
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
303
+ static ExprHandle make(
304
+ const ExprHandle& lhs,
305
+ const ExprHandle& rhs,
306
+ bool propagate_nans) {
307
+ return ExprHandle(alloc<Min>(lhs.node(), rhs.node(), propagate_nans));
308
+ }
309
+ };
310
+
311
+ // Encode typed immediate values e.g. IntImm, FloatImm.
312
+ #define IMM_DECLARE(Type, Name) \
313
+ class TORCH_API Name##Imm : public ExprNode<Name##Imm> { \
314
+ public: \
315
+ Name##Imm(Type value) \
316
+ : ExprNodeBase(k##Name, kPrimitive), value_(value) {} \
317
+ bool isConstant() const override { \
318
+ return true; \
319
+ } \
320
+ Type value() const { \
321
+ return value_; \
322
+ } \
323
+ static ExprHandle make(Type value) { \
324
+ return ExprHandle(alloc<Name##Imm>(value)); \
325
+ } \
326
+ \
327
+ private: \
328
+ Type value_; \
329
+ };
330
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
331
+ #undef IMM_DECLARE
332
+
333
+ // Get immediate by ScalarType.
334
+ template <typename T>
335
+ ExprPtr getImmediateByType(ScalarType immType, T initialVal) {
336
+ switch (immType) {
337
+ #define TYPE_CASE(Type, Name) \
338
+ case ScalarType::Name: \
339
+ return alloc<Name##Imm>(Type(initialVal));
340
+ // NOLINTNEXTLINE(bugprone-branch-clone)
341
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
342
+ #undef TYPE_CASE
343
+ default:
344
+ throw unsupported_dtype();
345
+ }
346
+ return nullptr;
347
+ }
348
+
349
+ template <typename T>
350
+ ExprPtr getImmediateByType(Dtype dtype, T initialVal) {
351
+ return getImmediateByType<T>(dtype.scalar_type(), initialVal);
352
+ }
353
+
354
+ template <typename T>
355
+ ExprPtr immLike(const ExprPtr& e, T v) {
356
+ return getImmediateByType<T>(e->dtype(), v);
357
+ }
358
+
359
+ template <typename T>
360
+ ExprPtr immLike(const ExprHandle& e, T v) {
361
+ return immLike(e.node(), v);
362
+ }
363
+
364
+ inline c10::optional<int64_t> intValue(const ExprPtr& e) {
365
+ #define TYPE_CASE(Type, Name) \
366
+ if (auto v = to<Name##Imm>(e)) { \
367
+ return v->value(); \
368
+ }
369
+ AT_FORALL_INT_TYPES(TYPE_CASE);
370
+ #undef TYPE_CASE
371
+ return c10::nullopt;
372
+ }
373
+
374
+ inline c10::optional<int64_t> intValue(const ExprHandle& e) {
375
+ return intValue(e.node());
376
+ }
377
+
378
+ template <typename T>
379
+ T immediateAs(const ExprPtr& e) {
380
+ #define TYPE_CASE(Type, Name) \
381
+ if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
382
+ return imm->value(); \
383
+ }
384
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
385
+ #undef TYPE_CASE
386
+ throw unsupported_dtype();
387
+ return 0;
388
+ }
389
+
390
+ template <typename T>
391
+ T immediateAs(const ExprHandle& e) {
392
+ return immediateAs<T>(e.node());
393
+ }
394
+
395
+ template <typename T>
396
+ bool immediateEquals(const ExprPtr& e, T val) {
397
+ #define TYPE_CASE(Type, Name) \
398
+ if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
399
+ return imm->value() == val; \
400
+ }
401
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
402
+ #undef TYPE_CASE
403
+ throw unsupported_dtype();
404
+ return false;
405
+ }
406
+
407
+ TORCH_API bool immediateIsNegative(const ExprPtr& e);
408
+
409
+ TORCH_API bool immediateIsPositive(const ExprPtr& e);
410
+
411
+ TORCH_API bool immediateIsZero(const ExprPtr& e);
412
+
413
+ // Represents a ramp vector node:
414
+ // [base, base + 1 * stride, ... , base + (lanes - 1) * stride]
415
+ class TORCH_API Ramp : public ExprNode<Ramp> {
416
+ public:
417
+ ExprPtr base() const {
418
+ return base_;
419
+ }
420
+ ExprPtr stride() const {
421
+ return stride_;
422
+ }
423
+
424
+ void set_base(ExprPtr base) {
425
+ base_ = std::move(base);
426
+ }
427
+
428
+ void set_stride(ExprPtr stride) {
429
+ stride_ = std::move(stride);
430
+ }
431
+
432
+ static ExprHandle make(
433
+ const ExprHandle& base,
434
+ const ExprHandle& stride,
435
+ int lanes) {
436
+ if (stride.dtype() != base.dtype()) {
437
+ throw malformed_input("Bad stride in Ramp");
438
+ }
439
+ return ExprHandle(alloc<Ramp>(base.node(), stride.node(), lanes));
440
+ }
441
+ int lanes() const {
442
+ return lanes_;
443
+ }
444
+
445
+ Ramp(ExprPtr base, ExprPtr stride, int lanes)
446
+ : ExprNodeBase(Dtype(base->dtype(), lanes)),
447
+ base_(std::move(base)),
448
+ stride_(std::move(stride)),
449
+ lanes_(lanes) {}
450
+
451
+ private:
452
+ ExprPtr base_;
453
+ ExprPtr stride_;
454
+ int lanes_;
455
+ };
456
+
457
+ class TORCH_API Load : public ExprNode<Load> {
458
+ public:
459
+ VarPtr base_handle() const {
460
+ return buf_->base_handle();
461
+ }
462
+ std::vector<ExprPtr> indices() const {
463
+ return indices_;
464
+ }
465
+ ExprPtr flat_index() const {
466
+ TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened.");
467
+ return indices_[0];
468
+ }
469
+ BufPtr buf() const {
470
+ return buf_;
471
+ }
472
+
473
+ void set_buf(BufPtr buf) {
474
+ buf_ = std::move(buf);
475
+ }
476
+
477
+ void set_indices(std::vector<ExprPtr> indices) {
478
+ indices_ = std::move(indices);
479
+ }
480
+
481
+ static ExprHandle make(
482
+ Dtype dtype,
483
+ const BufHandle& buf,
484
+ const std::vector<ExprHandle>& indices);
485
+ static ExprHandle make(
486
+ const BufHandle& buf,
487
+ const std::vector<ExprHandle>& indices);
488
+
489
+ Load(Dtype dtype, BufPtr base_handle, std::vector<ExprPtr> indices);
490
+ Load(BufPtr base_handle, const std::vector<ExprPtr>& indices);
491
+
492
+ private:
493
+ BufPtr buf_;
494
+ std::vector<ExprPtr> indices_;
495
+ };
496
+
497
+ class TORCH_API Broadcast : public ExprNode<Broadcast> {
498
+ public:
499
+ ExprPtr value() const {
500
+ return value_;
501
+ }
502
+
503
+ void set_value(ExprPtr value) {
504
+ value_ = std::move(value);
505
+ }
506
+
507
+ int lanes() const {
508
+ return lanes_;
509
+ }
510
+ static ExprHandle make(const ExprHandle& value, int lanes) {
511
+ return ExprHandle(alloc<Broadcast>(value.node(), lanes));
512
+ }
513
+ Broadcast(ExprPtr value, int lanes)
514
+ : ExprNodeBase(Dtype(value->dtype(), lanes)),
515
+ value_(std::move(value)),
516
+ lanes_(lanes) {}
517
+
518
+ private:
519
+ ExprPtr value_;
520
+ int lanes_;
521
+ };
522
+
523
+ class TORCH_API IfThenElse : public ExprNode<IfThenElse> {
524
+ public:
525
+ ExprPtr condition() const {
526
+ return condition_;
527
+ }
528
+
529
+ // Lazily evaluated only if condition is true
530
+ ExprPtr true_value() const {
531
+ return true_;
532
+ }
533
+
534
+ // Lazily evaluated only if condition is false
535
+ ExprPtr false_value() const {
536
+ return false_;
537
+ }
538
+
539
+ void set_condition(ExprPtr condition) {
540
+ condition_ = std::move(condition);
541
+ }
542
+
543
+ void set_true_value(ExprPtr true_value) {
544
+ true_ = std::move(true_value);
545
+ }
546
+
547
+ void set_false_value(ExprPtr false_value) {
548
+ false_ = std::move(false_value);
549
+ }
550
+
551
+ static ExprHandle make(
552
+ const ExprHandle& c,
553
+ const ExprHandle& t,
554
+ const ExprHandle& f) {
555
+ if (!c.dtype().is_integral()) {
556
+ throw unsupported_dtype();
557
+ }
558
+ if (c.dtype().lanes() != 1) {
559
+ throw unsupported_dtype();
560
+ }
561
+ if (t.dtype() != f.dtype()) {
562
+ throw malformed_input("Bad dtype in IfThenElse");
563
+ }
564
+ return ExprHandle(alloc<IfThenElse>(c.node(), t.node(), f.node()));
565
+ }
566
+
567
+ IfThenElse(ExprPtr c, ExprPtr t, ExprPtr f)
568
+ : ExprNodeBase(t->dtype()),
569
+ condition_(std::move(c)),
570
+ true_(std::move(t)),
571
+ false_(std::move(f)) {}
572
+
573
+ private:
574
+ ExprPtr condition_;
575
+ ExprPtr true_;
576
+ ExprPtr false_;
577
+ };
578
+
579
+ class TORCH_API CompareSelect : public ExprNode<CompareSelect> {
580
+ public:
581
+ CompareSelectOperation compare_select_op() const {
582
+ return compare_op_;
583
+ }
584
+ ExprPtr lhs() const {
585
+ return this->lhs_;
586
+ }
587
+ ExprPtr rhs() const {
588
+ return this->rhs_;
589
+ }
590
+ ExprPtr ret_val1() const {
591
+ return this->ret_val1_;
592
+ }
593
+ ExprPtr ret_val2() const {
594
+ return this->ret_val2_;
595
+ }
596
+
597
+ void set_lhs(ExprPtr lhs) {
598
+ lhs_ = std::move(lhs);
599
+ }
600
+
601
+ void set_rhs(ExprPtr rhs) {
602
+ rhs_ = std::move(rhs);
603
+ }
604
+
605
+ void set_ret_val1(ExprPtr ret_val1) {
606
+ ret_val1_ = std::move(ret_val1);
607
+ }
608
+
609
+ void set_ret_val2(ExprPtr ret_val2) {
610
+ ret_val2_ = std::move(ret_val2);
611
+ }
612
+
613
+ CompareSelectBias bias() const {
614
+ return bias_;
615
+ }
616
+
617
+ static ExprHandle make(
618
+ const ExprHandle& lhs,
619
+ const ExprHandle& rhs,
620
+ CompareSelectOperation cmp_op,
621
+ CompareSelectBias bias = kUnbiased) {
622
+ if (lhs.dtype() != rhs.dtype()) {
623
+ throw malformed_input("bad dtype in CompareSelect");
624
+ }
625
+ return ExprHandle(alloc<CompareSelect>(
626
+ lhs.node(),
627
+ rhs.node(),
628
+ IntImm::make(1).node(),
629
+ IntImm::make(0).node(),
630
+ cmp_op,
631
+ bias));
632
+ }
633
+
634
+ static ExprHandle make(
635
+ const ExprHandle& lhs,
636
+ const ExprHandle& rhs,
637
+ const ExprHandle& ret_val1,
638
+ const ExprHandle& ret_val2,
639
+ CompareSelectOperation cmp_op,
640
+ CompareSelectBias bias = kUnbiased) {
641
+ if (lhs.dtype() != rhs.dtype() || ret_val1.dtype() != ret_val2.dtype()) {
642
+ throw malformed_input("bad dtype in CompareSelect");
643
+ }
644
+ return ExprHandle(alloc<CompareSelect>(
645
+ lhs.node(),
646
+ rhs.node(),
647
+ ret_val1.node(),
648
+ ret_val2.node(),
649
+ cmp_op,
650
+ bias));
651
+ }
652
+
653
+ CompareSelect(
654
+ ExprPtr lhs,
655
+ ExprPtr rhs,
656
+ ExprPtr ret_val1,
657
+ ExprPtr ret_val2,
658
+ CompareSelectOperation cmp_op,
659
+ CompareSelectBias bias = kUnbiased)
660
+ : ExprNodeBase(ret_val1->dtype()),
661
+ lhs_(std::move(lhs)),
662
+ rhs_(std::move(rhs)),
663
+ ret_val1_(std::move(ret_val1)),
664
+ ret_val2_(std::move(ret_val2)),
665
+ compare_op_(cmp_op),
666
+ bias_(bias) {}
667
+
668
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
669
+ CompareSelect(
670
+ ExprPtr lhs,
671
+ ExprPtr rhs,
672
+ CompareSelectOperation cmp_op,
673
+ CompareSelectBias bias = kUnbiased)
674
+ : ExprNodeBase(kInt),
675
+ lhs_(std::move(lhs)),
676
+ rhs_(std::move(rhs)),
677
+ ret_val1_(alloc<IntImm>(1)),
678
+ ret_val2_(alloc<IntImm>(0)),
679
+ compare_op_(cmp_op),
680
+ bias_(bias) {}
681
+
682
+ private:
683
+ ExprPtr lhs_;
684
+ ExprPtr rhs_;
685
+ ExprPtr ret_val1_;
686
+ ExprPtr ret_val2_;
687
+ CompareSelectOperation compare_op_;
688
+ CompareSelectBias bias_;
689
+ };
690
+
691
+ enum IntrinsicsOp {
692
+ kSin,
693
+ kCos,
694
+ kTan,
695
+ kAsin,
696
+ kAcos,
697
+ kAtan,
698
+ kAtan2,
699
+ kSinh,
700
+ kCosh,
701
+ kTanh,
702
+ kSigmoid,
703
+ kExp,
704
+ kExpm1,
705
+ kAbs,
706
+ kLog,
707
+ kLog2,
708
+ kLog10,
709
+ kLog1p,
710
+ kErf,
711
+ kErfc,
712
+ kSqrt,
713
+ kRsqrt,
714
+ kPow,
715
+ kCeil,
716
+ kFloor,
717
+ kRound,
718
+ kTrunc,
719
+ kFmod,
720
+ kRemainder,
721
+ kLgamma,
722
+ kFrac,
723
+ kIsNan,
724
+ kRand, // We need more discussions on this. Should we consider stateful?
725
+ kMaxIntrinsicsOp,
726
+ };
727
+
728
+ class TORCH_API Intrinsics : public ExprNode<Intrinsics> {
729
+ public:
730
+ static ExprHandle make(IntrinsicsOp op_type, const ExprHandle& v1) {
731
+ return ExprHandle(alloc<Intrinsics>(op_type, v1.node()));
732
+ }
733
+
734
+ static ExprHandle make(
735
+ IntrinsicsOp op_type,
736
+ const ExprHandle& v1,
737
+ const ExprHandle& v2) {
738
+ return ExprHandle(alloc<Intrinsics>(op_type, v1.node(), v2.node()));
739
+ }
740
+
741
+ static ExprHandle make(
742
+ IntrinsicsOp op_type,
743
+ const std::vector<ExprHandle>& params) {
744
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
745
+ std::vector<ExprPtr> params_nodes(params.size());
746
+ for (size_t i = 0; i < params.size(); i++) {
747
+ params_nodes[i] = params[i].node();
748
+ }
749
+ return ExprHandle(alloc<Intrinsics>(op_type, params_nodes));
750
+ }
751
+
752
+ static ExprHandle make(IntrinsicsOp op_type, Dtype dtype) {
753
+ return ExprHandle(alloc<Intrinsics>(op_type, dtype));
754
+ }
755
+
756
+ IntrinsicsOp op_type() const {
757
+ return op_type_;
758
+ }
759
+
760
+ std::string func_name() const {
761
+ switch (op_type()) {
762
+ case kSin:
763
+ return "sin";
764
+ case kCos:
765
+ return "cos";
766
+ case kTan:
767
+ return "tan";
768
+ case kAsin:
769
+ return "asin";
770
+ case kAcos:
771
+ return "acos";
772
+ case kAtan:
773
+ return "atan";
774
+ case kAtan2:
775
+ return "atan2";
776
+ case kSinh:
777
+ return "sinh";
778
+ case kCosh:
779
+ return "cosh";
780
+ case kTanh:
781
+ return "tanh";
782
+ case kSigmoid:
783
+ return "sigmoid";
784
+ case kExp:
785
+ return "exp";
786
+ case kAbs:
787
+ return "abs";
788
+ case kLog:
789
+ return "log";
790
+ case kLog2:
791
+ return "log2";
792
+ case kLog10:
793
+ return "log10";
794
+ case kLog1p:
795
+ return "log1p";
796
+ case kErf:
797
+ return "erf";
798
+ case kSqrt:
799
+ return "sqrt";
800
+ case kRsqrt:
801
+ return "rsqrt";
802
+ case kPow:
803
+ return "pow";
804
+ case kCeil:
805
+ return "ceil";
806
+ case kFloor:
807
+ return "floor";
808
+ case kRound:
809
+ return "round";
810
+ case kTrunc:
811
+ return "trunc";
812
+ case kRand:
813
+ return "rand";
814
+ case kFmod:
815
+ return "fmod";
816
+ case kRemainder:
817
+ return "remainder";
818
+ case kLgamma:
819
+ return "lgamma";
820
+ case kExpm1:
821
+ return "expm1";
822
+ case kErfc:
823
+ return "erfc";
824
+ case kFrac:
825
+ return "frac";
826
+ case kIsNan:
827
+ return "isnan";
828
+ default:
829
+ throw std::runtime_error(
830
+ "invalid op_type: " + c10::to_string(op_type()));
831
+ }
832
+ }
833
+
834
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
835
+ Intrinsics(IntrinsicsOp op_type, Dtype dtype)
836
+ : ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
837
+ params_({}),
838
+ op_type_(op_type) {
839
+ if (OpArgCount(op_type) != 0) {
840
+ throw malformed_input("bad arg count in Intrinsics");
841
+ }
842
+ }
843
+
844
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
845
+ Intrinsics(IntrinsicsOp op_type, ExprPtr v1)
846
+ : ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype())),
847
+ params_({std::move(v1)}),
848
+ op_type_(op_type) {
849
+ if (OpArgCount(op_type) != 1) {
850
+ throw malformed_input("bad arg count in Intrinsics");
851
+ }
852
+ }
853
+
854
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
855
+ Intrinsics(IntrinsicsOp op_type, ExprPtr v1, ExprPtr v2)
856
+ : ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype(), v2->dtype())),
857
+ params_({std::move(v1), std::move(v2)}),
858
+ op_type_(op_type) {
859
+ if (OpArgCount(op_type) != 2) {
860
+ throw malformed_input("bad arg count in Intrinsics");
861
+ }
862
+ }
863
+
864
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
865
+ Intrinsics(IntrinsicsOp op_type, const std::vector<ExprPtr>& params)
866
+ : ExprNodeBase(IntrinsicsDtype(op_type, params)),
867
+ params_(params),
868
+ op_type_(op_type) {
869
+ if (OpArgCount(op_type) != nparams()) {
870
+ throw malformed_input("bad arg count in Intrinsics");
871
+ }
872
+ }
873
+
874
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
875
+ Intrinsics(
876
+ IntrinsicsOp op_type,
877
+ Dtype dtype,
878
+ const std::vector<ExprPtr>& params)
879
+ : ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
880
+ params_(params),
881
+ op_type_(op_type) {
882
+ if (OpArgCount(op_type) != nparams()) {
883
+ throw malformed_input("bad arg count in Intrinsics");
884
+ }
885
+ }
886
+
887
+ bool isPure() const {
888
+ return op_type_ != kRand;
889
+ }
890
+
891
+ int nparams() const {
892
+ return params_.size();
893
+ }
894
+
895
+ ExprPtr param(int index) const {
896
+ return params_[index];
897
+ }
898
+ const std::vector<ExprPtr>& params() const {
899
+ return params_;
900
+ }
901
+
902
+ void set_params(std::vector<ExprPtr> params) {
903
+ params_ = std::move(params);
904
+ }
905
+
906
+ static int OpArgCount(IntrinsicsOp op_type);
907
+
908
+ private:
909
+ static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1);
910
+ static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1, Dtype dt2);
911
+ static Dtype IntrinsicsDtype(
912
+ IntrinsicsOp op_type,
913
+ const std::vector<ExprPtr>& params);
914
+
915
+ std::vector<ExprPtr> params_;
916
+ IntrinsicsOp op_type_;
917
+ };
918
+
919
+ TORCH_API std::vector<ExprPtr> ExprHandleVectorToExprVector(
920
+ const std::vector<ExprHandle>&);
921
+ TORCH_API std::vector<ExprHandle> ExprVectorToExprHandleVector(
922
+ const std::vector<ExprPtr>&);
923
+ TORCH_API std::vector<VarPtr> VarHandleVectorToVarVector(
924
+ const std::vector<VarHandle>&);
925
+ TORCH_API std::vector<VarHandle> VarVectorToVarHandleVector(
926
+ const std::vector<VarPtr>&);
927
+ TORCH_API ExprPtr flatten_index(
928
+ const std::vector<ExprPtr>& dims,
929
+ const std::vector<ExprPtr>& indices,
930
+ const std::vector<ExprPtr>& strides);
931
+
932
+ } // namespace tensorexpr
933
+ } // namespace jit
934
+ } // namespace torch