applied-ai-018 commited on
Commit
d104494
·
verified ·
1 Parent(s): 014ad1c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h +351 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h +194 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h +81 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h +685 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h +200 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h +119 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h +65 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h +41 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h +54 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h +11 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h +34 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h +18 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h +10 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h +11 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h +16 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h +241 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h +16 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h +15 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h +54 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h +12 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h +17 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h +16 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h +21 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h +576 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h +57 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h +27 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h +87 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h +33 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h +7 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h +68 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h +70 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h +40 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h +55 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h +459 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h +47 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h +12 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h +857 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h +414 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h +220 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h +1275 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h +21 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h +39 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h +38 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h +45 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h +197 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h +110 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_operators.h +27 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h +119 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h +9 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h +213 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/function.h>
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/jit/api/function_impl.h>
5
+ #include <torch/csrc/jit/frontend/name_mangler.h>
6
+ #include <torch/csrc/jit/frontend/source_range.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+ #include <torch/csrc/jit/runtime/graph_executor.h>
9
+
10
+ #include <torch/csrc/Export.h>
11
+
12
+ #include <ATen/core/function_schema.h>
13
+ #include <ATen/core/qualified_name.h>
14
+ #include <c10/util/ArrayRef.h>
15
+ #include <c10/util/Optional.h>
16
+
17
+ #include <functional>
18
+ #include <memory>
19
+ #include <mutex>
20
+ #include <ostream>
21
+ #include <string>
22
+ #include <unordered_map>
23
+ #include <vector>
24
+
25
+ namespace torch::jit {
26
+
27
+ struct Def;
28
+ struct Property;
29
+ struct ClassDef;
30
+ struct SugaredValue;
31
+ struct Resolver;
32
+
33
+ using ResolverPtr = std::shared_ptr<Resolver>;
34
+ struct Self {
35
+ virtual ~Self() = default;
36
+ virtual std::shared_ptr<SugaredValue> makeSugared(Value* v) const = 0;
37
+ virtual ClassTypePtr getClassType() const = 0;
38
+ };
39
+
40
+ // A CompilationUnit is a list of named Functions
41
+ // with helper methods to iterate the list or invoke the function.
42
+ // Classes have a CompilationUnit holding the class methods,
43
+ // and Modules have a CompilationUnit holding the Functions that
44
+ // are used to implement their Methods
45
+
46
+ struct TORCH_API CompilationUnit {
47
+ enum class FunctionType { Method, Hook, PreHook };
48
+ // constructor that takes a set of functions to compile using the native
49
+ // resolver
50
+ explicit CompilationUnit(const std::string& source);
51
+ CompilationUnit() = default;
52
+
53
+ CompilationUnit& operator=(CompilationUnit&&) = default;
54
+ CompilationUnit(CompilationUnit&&) = default;
55
+ CompilationUnit& operator=(const CompilationUnit&) = delete;
56
+ CompilationUnit(const CompilationUnit&) = delete;
57
+
58
+ Function* find_function(const c10::QualifiedName& name) const {
59
+ auto it = dict_.find(name);
60
+ if (it == dict_.end()) {
61
+ return nullptr;
62
+ }
63
+ return functions_[it->second].get();
64
+ }
65
+
66
+ Function& get_function(const c10::QualifiedName& name) const {
67
+ if (auto r = find_function(name)) {
68
+ return *r;
69
+ }
70
+ TORCH_CHECK(false, "attempted to get undefined function ", name.name());
71
+ }
72
+
73
+ void set_optimized(bool o) {
74
+ TORCH_WARN(
75
+ "CompilationUnit::set_optimized() is deprecated and has no effect. "
76
+ "Please use setGraphExecutorOptimize()");
77
+ }
78
+
79
+ bool is_optimized() const {
80
+ TORCH_WARN(
81
+ "CompilationUnit::is_optimized() is deprecated and always returns true. "
82
+ "Please use getGraphExecutorOptimize()");
83
+ return true;
84
+ }
85
+
86
+ // for historic reasons, these are defined in ir_emitter.cpp
87
+ // Returns the list of Functions just defined.
88
+ std::vector<Function*> define(
89
+ const c10::optional<c10::QualifiedName>& prefix,
90
+ const std::vector<Property>& properties,
91
+ const std::vector<ResolverPtr>& propResolvers,
92
+ const std::vector<Def>& definitions,
93
+ const std::vector<ResolverPtr>&
94
+ defResolvers, /* determines how we handle free
95
+ variables in each definition*/
96
+ // if non-null, the first argument to each def, is bound to this value
97
+ const Self* self,
98
+ // see [name mangling]
99
+ bool shouldMangle = false,
100
+ c10::optional<size_t> operator_set_version = c10::nullopt);
101
+
102
+ void define_hooks(
103
+ const c10::optional<c10::QualifiedName>& prefix,
104
+ const std::vector<Def>& hookDefs,
105
+ const std::vector<ResolverPtr>& hookResolvers,
106
+ const std::vector<Def>& preHookDefs,
107
+ const std::vector<ResolverPtr>& preHookResolvers,
108
+ const Self* self,
109
+ bool shouldMangle = false);
110
+
111
+ // same as above but parse the definitions from source
112
+ // Returns the list of Functions just defined.
113
+ std::vector<Function*> define(
114
+ // prefix namespace to put all the defined functions into
115
+ const c10::optional<c10::QualifiedName>& prefix,
116
+ const std::string& source,
117
+ const ResolverPtr& resolver,
118
+ const Self* self);
119
+
120
+ void define_interface(
121
+ const c10::QualifiedName& qualifiedName,
122
+ const ClassDef& classDef,
123
+ ResolverPtr rcb,
124
+ bool is_module = false);
125
+
126
+ Function* create_function(
127
+ c10::QualifiedName name,
128
+ std::shared_ptr<Graph> graph,
129
+ bool shouldMangle = false) {
130
+ if (shouldMangle) {
131
+ name = mangle(name);
132
+ }
133
+ auto fn = std::make_unique<GraphFunction>(
134
+ std::move(name), std::move(graph), nullptr);
135
+ auto ret = fn.get();
136
+ register_function(std::move(fn));
137
+ return ret;
138
+ }
139
+
140
+ std::vector<Function*> get_functions() const {
141
+ return fmap(functions_, [](const std::unique_ptr<Function>& fn) {
142
+ return fn.get();
143
+ });
144
+ }
145
+
146
+ /// Run a method from this compilation.
147
+ ///
148
+ /// For example:
149
+ /// @code
150
+ /// IValue output = module->run("relu_script", a, b);
151
+ /// @endcode
152
+ ///
153
+ /// To get a compile a module from a source string, see torch::jit::compile
154
+ ///
155
+ /// @param method_name The name of the method to run
156
+ /// @param args Arguments to be passed to the method
157
+ /// @return An IValue containing the return value (or values if it is a tuple)
158
+ /// from the method
159
+ template <typename... Types>
160
+ IValue run_method(const c10::QualifiedName& method_name, Types&&... args) {
161
+ return get_function(method_name)({IValue(std::forward<Types>(args))...});
162
+ }
163
+
164
+ void drop_all_functions() {
165
+ dict_.clear();
166
+ functions_.clear();
167
+ }
168
+
169
+ /**
170
+ * Register a class as being owned by this compilation unit.
171
+ */
172
+ void register_type(c10::NamedTypePtr namedType) {
173
+ // TODO: class types cannot be redefined because we have no way right now
174
+ // of invalidating their methods. NamedTuples are fine though, since they
175
+ // don't have methods.
176
+ TORCH_CHECK(
177
+ 0 == classDict_.count(*namedType->name()),
178
+ "class '",
179
+ namedType->name()->qualifiedName(),
180
+ "' already defined.");
181
+ classes_.push_back(std::move(namedType));
182
+ classDict_[*classes_.back()->name()] = classes_.size() - 1;
183
+ };
184
+
185
+ c10::ClassTypePtr get_class(const c10::QualifiedName& name) const {
186
+ auto type = get_type(name);
187
+ if (!type) {
188
+ return nullptr;
189
+ }
190
+ return type->cast<c10::ClassType>();
191
+ }
192
+
193
+ c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const {
194
+ auto type = get_type(name);
195
+ if (!type) {
196
+ return nullptr;
197
+ }
198
+ return type->cast<c10::InterfaceType>();
199
+ }
200
+
201
+ c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const {
202
+ for (const auto& cls : classes_) {
203
+ if (cls->name()->qualifiedName() == name.qualifiedName()) {
204
+ return cls->expect<TupleType>();
205
+ }
206
+ }
207
+ return nullptr;
208
+ }
209
+
210
+ c10::NamedTypePtr get_type(const c10::QualifiedName& name) const {
211
+ auto it = classDict_.find(name);
212
+ if (it == classDict_.end()) {
213
+ return nullptr;
214
+ }
215
+ return classes_[it->second];
216
+ }
217
+
218
+ // For testing: clear all Python-defined classes to ensure that unit tests
219
+ // have isolation.
220
+ void _clear_python_cu() {
221
+ // Delete all the associated class methods
222
+ for (const auto& type : classes_) {
223
+ if (auto cls = type->cast<ClassType>()) {
224
+ for (auto method : cls->methods()) {
225
+ // Tombstone the method in the compilation unit.
226
+ // Don't erase because the dict_
227
+ auto it = dict_.find(method->qualname());
228
+ if (it != dict_.end()) {
229
+ functions_[it->second] = nullptr;
230
+ // Erase in our big lookup table
231
+ dict_.erase(it);
232
+ }
233
+ }
234
+ // Classes can have multiple pointers to the same hook,
235
+ // need to make sure to not delete it twice
236
+ std::unordered_set<Function*> hooks_to_delete;
237
+ for (const auto& hook : cls->getForwardHooks()) {
238
+ hooks_to_delete.insert(hook);
239
+ }
240
+ for (const auto& pre_hook : cls->getForwardPreHooks()) {
241
+ hooks_to_delete.insert(pre_hook);
242
+ }
243
+ for (const auto& hook : hooks_to_delete) {
244
+ // Tombstone the hook in the compilation unit.
245
+ auto it = dict_.find(hook->qualname());
246
+ if (it != dict_.end()) {
247
+ functions_[it->second] = nullptr;
248
+ // Erase in our big lookup table
249
+ dict_.erase(it);
250
+ }
251
+ }
252
+ }
253
+ }
254
+ classes_.clear();
255
+ classDict_.clear();
256
+ }
257
+
258
+ // [Internal Only] Remove method.
259
+ // Note Used for freezing.
260
+ void unsafeRemoveMethod(const c10::QualifiedName& method_name) {
261
+ auto it = dict_.find(method_name);
262
+ TORCH_CHECK(
263
+ it != dict_.end(),
264
+ "method '",
265
+ method_name.qualifiedName(),
266
+ "' does not exist.");
267
+ functions_[it->second] = nullptr;
268
+ dict_.erase(it);
269
+ }
270
+
271
+ // [name mangling] All code objects must have a unique qualified name in a
272
+ // CompilationUnit. In Python, sometimes functions won't have unique qualified
273
+ // name (for example, nested functions). So we mangle Python functions to
274
+ // ensure that they are uniquely named.
275
+ //
276
+ // We also use mangling to distinguish different Module instances. Since each
277
+ // Module is a singleton class instance, different instances of the same
278
+ // Python Module will have different types but the same qualified name.
279
+ c10::QualifiedName mangle(const c10::QualifiedName& name) const {
280
+ auto mangled = name;
281
+ while (get_type(mangled) || find_function(mangled)) {
282
+ mangled = mangler_.mangle(mangled);
283
+ }
284
+ return mangled;
285
+ }
286
+
287
+ private:
288
+ std::unique_ptr<Function> define(
289
+ const c10::optional<c10::QualifiedName>& prefix,
290
+ const Def& def,
291
+ const ResolverPtr& resolver,
292
+ const Self* self,
293
+ const std::unordered_map<std::string, Function*>& function_table,
294
+ bool shouldMangle = false,
295
+ FunctionType type = FunctionType::Method,
296
+ c10::optional<size_t> version = c10::nullopt) const;
297
+
298
+ // Define a property on \p self.
299
+ struct PropertyPair;
300
+ PropertyPair define_property(
301
+ const c10::optional<c10::QualifiedName>& prefix,
302
+ const Property& prop,
303
+ const ResolverPtr& resolver,
304
+ const Self* self,
305
+ const std::unordered_map<std::string, Function*>& function_table,
306
+ bool shouldMangle = false) const;
307
+
308
+ Function& register_function(std::unique_ptr<Function> fn) {
309
+ TORCH_CHECK(
310
+ 0 == dict_.count(fn->qualname().qualifiedName()),
311
+ "method '",
312
+ fn->qualname().qualifiedName(),
313
+ "' already defined.");
314
+ functions_.emplace_back(std::move(fn));
315
+ dict_[functions_.back()->qualname()] = functions_.size() - 1;
316
+ return *functions_.back();
317
+ }
318
+ std::vector<std::unique_ptr<Function>> functions_;
319
+ // for fast lookup
320
+ std::unordered_map<c10::QualifiedName, size_t> dict_;
321
+ std::unordered_map<c10::QualifiedName, size_t> classDict_;
322
+
323
+ // [class ownership] Right now there are two relationships between classes
324
+ // and compilation units:
325
+ // 1. Classes have compilation units internally that hold their methods.
326
+ // 2. On load, the TypePtrs of any imported classes are owned by the main
327
+ // module's compilation unit.
328
+ std::vector<c10::NamedTypePtr> classes_;
329
+
330
+ mutable NameMangler mangler_;
331
+ };
332
+
333
+ // An owning pointer to a Function. Just a pair of a raw Function ptr and it's
334
+ // owning CU. We need this because pybind requires a ref-counted way to refer to
335
+ // Functions.
336
+ struct StrongFunctionPtr {
337
+ StrongFunctionPtr(std::shared_ptr<CompilationUnit> cu, Function* function)
338
+ : cu_(std::move(cu)), function_(function) {
339
+ TORCH_INTERNAL_ASSERT(cu_);
340
+ TORCH_INTERNAL_ASSERT(function_);
341
+ }
342
+ std::shared_ptr<CompilationUnit> cu_;
343
+ Function* function_;
344
+ };
345
+
346
+ namespace script {
347
+ // We once had a `script::` namespace that was deleted. This is for backcompat
348
+ // of the public API; new code should not use this type alias.
349
+ using CompilationUnit = ::torch::jit::CompilationUnit;
350
+ } // namespace script
351
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/runtime/graph_executor.h>
6
+
7
+ namespace torch::jit {
8
+
9
+ struct TORCH_API GraphFunction : public Function {
10
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
11
+ GraphFunction(
12
+ c10::QualifiedName name,
13
+ std::shared_ptr<Graph> graph,
14
+ std::function<void(GraphFunction&)> function_creator,
15
+ c10::optional<ExecutorExecutionMode> executor_execution_mode =
16
+ c10::nullopt)
17
+ : name_(std::move(name)),
18
+ graph_(std::move(graph)),
19
+ executor_execution_mode_(executor_execution_mode),
20
+ function_creator_(std::move(function_creator)) {}
21
+
22
+ bool isGraphFunction() const override {
23
+ return true;
24
+ }
25
+
26
+ void run(Stack& stack) override;
27
+
28
+ std::function<void(GraphFunction&)> function_creator() const {
29
+ return function_creator_;
30
+ }
31
+
32
+ c10::intrusive_ptr<c10::ivalue::Future> runAsync(
33
+ Stack& stack,
34
+ TaskLauncher taskLauncher = at::launch) override;
35
+
36
+ std::shared_ptr<Graph> graph() const {
37
+ return graph_;
38
+ }
39
+
40
+ std::shared_ptr<Graph> optimized_graph() const {
41
+ std::lock_guard<std::recursive_mutex> lock(compile_mutex);
42
+ auto& optimized_graph = optimized_graphs_[currentSpecialization()];
43
+ if (optimized_graph) {
44
+ return *optimized_graph;
45
+ }
46
+ optimized_graph = graph_->copy();
47
+ if (getGraphExecutorOptimize()) {
48
+ preoptimizeGraph(*optimized_graph, force_no_amp_);
49
+ }
50
+ return *optimized_graph;
51
+ }
52
+
53
+ const c10::QualifiedName& qualname() const override {
54
+ return name_;
55
+ }
56
+
57
+ // private/unstable api. sets the initial execution mode
58
+ // will not affect executor if there is an existing executor
59
+ // created for this function
60
+ void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) {
61
+ executor_execution_mode_ = mode;
62
+ }
63
+ // private/unstable api. sets flag of whether or not to ignore amp.
64
+ // will not affect executor if there is an existing executor
65
+ // created for this function
66
+ void _set_ignore_amp(bool ignore_amp) {
67
+ force_no_amp_ = ignore_amp;
68
+ }
69
+
70
+ // if this isn't yet defined, run its method_creator function
71
+ void ensure_defined() override;
72
+
73
+ size_t num_inputs() const override {
74
+ return graph()->inputs().size();
75
+ }
76
+
77
+ Function& setSchema(FunctionSchema schema) override {
78
+ schema_ = std::make_unique<FunctionSchema>(std::move(schema));
79
+ return *this;
80
+ }
81
+
82
+ const FunctionSchema& getSchema() const override;
83
+
84
+ GraphExecutorState getDebugState() {
85
+ return get_executor().getDebugState();
86
+ }
87
+
88
+ bool is_optimized() const {
89
+ TORCH_WARN(
90
+ "GraphFunction::is_optimized() is deprecated and always returns true. "
91
+ "Please use getGraphExecutorOptimize()");
92
+ return true;
93
+ }
94
+
95
+ void check_single_output() {
96
+ TORCH_CHECK(
97
+ graph()->outputs().size() == 1,
98
+ "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs");
99
+ }
100
+
101
+ GraphExecutor& get_executor() {
102
+ ensure_defined();
103
+ std::lock_guard<std::recursive_mutex> lock(compile_mutex);
104
+ auto& executor = executors_[currentSpecialization()];
105
+ if (executor) {
106
+ return *executor;
107
+ }
108
+ check_single_output();
109
+ const std::string& name = name_.name();
110
+ std::shared_ptr<Graph> opt_graph = optimized_graph();
111
+ if (!executor_execution_mode_) {
112
+ executor = GraphExecutor(opt_graph, name);
113
+ } else {
114
+ executor = GraphExecutor(opt_graph, name, *executor_execution_mode_);
115
+ }
116
+ return *executor;
117
+ }
118
+
119
+ using Function::call;
120
+ bool call(
121
+ Stack& stack,
122
+ c10::optional<size_t> bailOut,
123
+ c10::function_ref<void(const Code&)> f) override {
124
+ f(get_executor().getPlanFor(stack, bailOut).code);
125
+ return true;
126
+ }
127
+
128
+ void clear_optimized_graphs() {
129
+ optimized_graphs_.fill(c10::nullopt);
130
+ }
131
+
132
+ private:
133
+ enum SpecializationKey {
134
+ AutocastOff,
135
+ CpuAutocastOn,
136
+ GpuAutocastOn,
137
+ CpuGpuAutocastOn,
138
+
139
+ // This provides the number of specializations
140
+ // (Must be last entry)
141
+ TotalCount
142
+ };
143
+
144
+ SpecializationKey currentSpecialization() const;
145
+
146
+ private:
147
+ c10::QualifiedName name_;
148
+ // The original, non-optimized graph
149
+ std::shared_ptr<Graph> graph_; // for debugging and for inlining
150
+
151
+ // allows users to specify Simple/Profiling Executor for function
152
+ // TODO: add more executors
153
+ mutable c10::optional<ExecutorExecutionMode> executor_execution_mode_;
154
+
155
+ // if invoked on a graph that has already traced through amp
156
+ // don't invoke amp pass
157
+ mutable bool force_no_amp_ = false;
158
+ // Optimized graph, computed lazily. Used for inlining.
159
+ mutable std::array<
160
+ c10::optional<std::shared_ptr<Graph>>,
161
+ SpecializationKey::TotalCount>
162
+ optimized_graphs_;
163
+
164
+ // GraphFunctions are invokable from multiple threads, so this lock needs to
165
+ // be held when we're initializing graph executor for the first time or
166
+ // computing the optimized graph. We're using reentrant mutex so that we don't
167
+ // need to worry about causing a deadlock by calling one method from another
168
+ // (e.g. optimized_graph() from get_executor()).
169
+ mutable std::recursive_mutex compile_mutex;
170
+
171
+ // executor_[0] - autocast off
172
+ // executor_[1] - autocast cpu on
173
+ // executor_[2] - autocast gpu on
174
+ // executor_[3] - autocast cpu & gpu on
175
+ std::array<c10::optional<GraphExecutor>, SpecializationKey::TotalCount>
176
+ executors_;
177
+
178
+ // an optional function that actually creates the method when
179
+ // ensure_defined() is called. This is used by the compiler so
180
+ // that it can construct methods out of order
181
+ std::function<void(GraphFunction&)> function_creator_;
182
+
183
+ // if absent, then we generate a default schema based on the graph
184
+ // mutable because getSchema caches the default schema if one is requested
185
+ // before a call to setSchema
186
+ mutable std::unique_ptr<FunctionSchema> schema_;
187
+ };
188
+
189
+ // Short hands for dynamic_cast<GraphFunction*>.
190
+ TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept;
191
+ TORCH_API GraphFunction& toGraphFunction(Function&);
192
+ TORCH_API const GraphFunction& toGraphFunction(const Function&);
193
+
194
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <torch/csrc/api/include/torch/imethod.h>
7
+ #include <torch/csrc/jit/api/function_impl.h>
8
+
9
+ namespace torch::jit {
10
+
11
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
12
+
13
+ // A method in a module, e.g. f in:
14
+ //
15
+ // class M(ScriptModule):
16
+ // @script_method
17
+ // def f(self, x):
18
+ // ...
19
+ // Note: because Method/Module are exposed to python these
20
+ // classes use python method naming conventions
21
+ struct TORCH_API Method : public torch::IMethod {
22
+ Method(ObjectPtr owner, Function* function);
23
+
24
+ // the module that contains this method.
25
+ Module owner() const;
26
+ void run(Stack& stack);
27
+ void run(Stack&& stack) {
28
+ run(stack);
29
+ }
30
+
31
+ c10::IValue operator()(
32
+ std::vector<c10::IValue> stack,
33
+ const Kwargs& kwargs = Kwargs()) const override;
34
+
35
+ // Run method async. Invocation on this function would invokes a JIT
36
+ // interpreter that executes ops inline, one by one, on caller's thread. A
37
+ // model can utilize async op, i.e. `fork`, to launch an asynchronous task
38
+ // which will be launched on provided `taskLauncher`.
39
+ c10::intrusive_ptr<c10::ivalue::Future> run_async(
40
+ std::vector<c10::IValue> stack,
41
+ const Kwargs& kwargs = Kwargs(),
42
+ TaskLauncher taskLauncher = at::launch);
43
+
44
+ std::shared_ptr<Graph> graph() const {
45
+ return toGraphFunction(*function_).graph();
46
+ }
47
+
48
+ const std::string& name() const override {
49
+ return function_->name();
50
+ }
51
+
52
+ size_t num_inputs() const {
53
+ return function_->num_inputs();
54
+ }
55
+
56
+ GraphExecutor& get_executor() {
57
+ return toGraphFunction(*function_).get_executor();
58
+ }
59
+
60
+ Function& function() const {
61
+ return *function_;
62
+ }
63
+
64
+ private:
65
+ void setArgumentNames(std::vector<std::string>&) const override;
66
+
67
+ // Methods are uniqued onwed by a single module. This raw pointer allows
68
+ // looking up the module.
69
+ ObjectPtr owner_;
70
+
71
+ // Underlying unbound function
72
+ Function* function_;
73
+ };
74
+
75
+ namespace script {
76
+ // We once had a `script::` namespace that was deleted. This is for backcompat
77
+ // of the public API; new code should not use this type alias.
78
+ using Method = ::torch::jit::Method;
79
+ } // namespace script
80
+
81
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h ADDED
@@ -0,0 +1,685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <torch/csrc/autograd/variable.h>
4
+ #include <torch/csrc/jit/api/object.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+ #include <torch/csrc/jit/ir/named_value.h>
8
+ #include <torch/csrc/jit/runtime/argument_spec.h>
9
+ #include <torch/csrc/jit/runtime/graph_executor.h>
10
+
11
+ #include <torch/csrc/Export.h>
12
+ #include <torch/csrc/api/include/torch/ordered_dict.h>
13
+ #include <torch/csrc/jit/api/compilation_unit.h>
14
+
15
+ #include <ATen/core/function_schema.h>
16
+ #include <ATen/core/qualified_name.h>
17
+ #include <c10/util/ArrayRef.h>
18
+ #include <c10/util/Optional.h>
19
+ #include <c10/util/irange.h>
20
+
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <mutex>
24
+ #include <ostream>
25
+ #include <string>
26
+ #include <unordered_map>
27
+ #include <unordered_set>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ // This file contains classes which assist in desugaring Python style
32
+ // modules and their methods into flattened graphs which don't have any
33
+ // function calls.
34
+
35
+ namespace torch::jit {
36
+
37
+ using ::c10::Argument;
38
+ using ::c10::FunctionSchema;
39
+ using ::c10::QualifiedName;
40
+ // Map which stores filename to content.
41
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
42
+
43
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
44
+
45
+ struct Module;
46
+
47
+ template <typename T>
48
+ struct slot_list_impl;
49
+
50
+ template <typename T>
51
+ struct Named {
52
+ std::string name;
53
+ T value;
54
+ };
55
+
56
+ using NameModule = Named<Module>;
57
+ using NameValue = Named<IValue>;
58
+ using NameTensor = Named<at::Tensor>;
59
+
60
+ namespace detail {
61
+ struct TORCH_API ModulePolicy;
62
+ struct TORCH_API ParameterPolicy;
63
+ struct TORCH_API AttributePolicy;
64
+ struct TORCH_API BufferPolicy;
65
+ template <typename P>
66
+ struct NamedPolicy;
67
+ } // namespace detail
68
+
69
+ using module_list = slot_list_impl<detail::ModulePolicy>;
70
+ using named_module_list =
71
+ slot_list_impl<detail::NamedPolicy<detail::ModulePolicy>>;
72
+
73
+ using parameter_list = slot_list_impl<detail::ParameterPolicy>;
74
+ using named_parameter_list =
75
+ slot_list_impl<detail::NamedPolicy<detail::ParameterPolicy>>;
76
+
77
+ using attribute_list = slot_list_impl<detail::AttributePolicy>;
78
+ using named_attribute_list =
79
+ slot_list_impl<detail::NamedPolicy<detail::AttributePolicy>>;
80
+
81
+ using buffer_list = slot_list_impl<detail::BufferPolicy>;
82
+ using named_buffer_list =
83
+ slot_list_impl<detail::NamedPolicy<detail::BufferPolicy>>;
84
+
85
+ using ModuleLookup = std::function<Module(const std::vector<std::string>&)>;
86
+
87
+ struct TORCH_API Module : public Object {
88
+ explicit Module(c10::QualifiedName class_name);
89
+ Module(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
90
+ Module() = default;
91
+ Module(const Module&) = default;
92
+ Module& operator=(const Module&) = default;
93
+ Module(Module&&) noexcept = default;
94
+ Module& operator=(Module&&) noexcept = default;
95
+ Module(
96
+ c10::QualifiedName,
97
+ std::shared_ptr<CompilationUnit> cu,
98
+ bool shouldMangle = false);
99
+ Module(ModulePtr module_value) : Object(std::move(module_value)) {}
100
+ ~Module() = default;
101
+
102
+ void set_optimized(bool o) {
103
+ TORCH_WARN(
104
+ "Module::set_optimized() is deprecated and has no effect. "
105
+ "Please use setGraphExecutorOptimize()");
106
+ }
107
+
108
+ bool is_optimized() const {
109
+ TORCH_WARN(
110
+ "Module::is_optimized() is deprecated and always returns true. "
111
+ "Please use getGraphExecutorOptimize()");
112
+ return true;
113
+ }
114
+
115
+ IValue forward(std::vector<IValue> inputs, const Kwargs& kwargs = Kwargs()) {
116
+ return get_method("forward")(std::move(inputs), kwargs);
117
+ }
118
+
119
+ // In script modules, buffers are Tensors attribute that are _not_ registered
120
+ // as parameters. This is different than in nn.Module where there is a special
121
+ // register_buffer method. With this simplification, we only need to track
122
+ // whether a slot is a parameter to be able to classify it.
123
+ void register_buffer(const std::string& name, at::Tensor v) {
124
+ bool is_param = false;
125
+ bool is_buffer = true;
126
+ std::lock_guard<std::mutex> lock(*register_mutex_);
127
+ type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer);
128
+ _ivalue()->setAttr(name, std::move(v));
129
+ }
130
+
131
+ void register_parameter(
132
+ const std::string& name,
133
+ at::Tensor v,
134
+ bool is_buffer) {
135
+ std::lock_guard<std::mutex> lock(*register_mutex_);
136
+ type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer);
137
+ _ivalue()->setAttr(name, std::move(v));
138
+ }
139
+
140
+ void register_attribute(
141
+ const std::string& name,
142
+ const TypePtr& t,
143
+ IValue v,
144
+ bool is_param = false,
145
+ bool is_buffer = false) {
146
+ type()->addOrCheckAttribute(name, t, is_param, is_buffer);
147
+ _ivalue()->setAttr(name, std::move(v));
148
+ }
149
+
150
+ void register_module(const std::string& name, const Module& module) {
151
+ type()->addOrCheckAttribute(name, module.type());
152
+ _ivalue()->setAttr(name, module._ivalue());
153
+ }
154
+
155
+ void apply(const std::function<void(Module&)>& fn);
156
+
157
+ buffer_list buffers(bool recurse = true) const;
158
+ named_buffer_list named_buffers(bool recurse = true) const;
159
+
160
+ module_list children() const; // direct modules
161
+ named_module_list named_children() const;
162
+ module_list modules() const; // all modules, including this one, recursively
163
+ named_module_list named_modules() const;
164
+
165
+ // all tensors involved in gradient optimization
166
+ parameter_list parameters(bool recurse = true) const;
167
+ named_parameter_list named_parameters(bool recurse = true) const;
168
+
169
+ // all members of the object, similar to iterating over dir(obj) in python
170
+ attribute_list attributes(bool recurse = true) const;
171
+ named_attribute_list named_attributes(bool recurse = true) const;
172
+
173
+ void dump(
174
+ bool print_method_bodies,
175
+ bool print_attr_values,
176
+ bool print_param_values) const;
177
+
178
+ std::string dump_to_str(
179
+ bool print_method_bodies,
180
+ bool print_attr_values,
181
+ bool print_param_values) const;
182
+
183
+ /// Enables "training" mode.
184
+ void train(bool on = true);
185
+ /// Calls train(false) to enable "eval" mode.
186
+ /// Do not override this method, override `train()` instead.
187
+ void eval() {
188
+ train(/*on=*/false);
189
+ }
190
+ /// True if the module is in training mode.
191
+ bool is_training() const {
192
+ return attr("training", true).toBool();
193
+ }
194
+
195
+ /// Recursively casts all parameters to the given `dtype` and `device`.
196
+ ///
197
+ /// If `non_blocking` is true and the source is in pinned memory and
198
+ /// destination is on the GPU or vice versa, the copy is performed
199
+ /// asynchronously with respect to the host. Otherwise, the argument has no
200
+ /// effect.
201
+ void to(at::Device device, at::ScalarType dtype, bool non_blocking = false);
202
+
203
+ /// Recursively casts all parameters to the given dtype.
204
+ ///
205
+ /// If `non_blocking` is true and the source is in pinned memory and
206
+ /// destination is on the GPU or vice versa, the copy is performed
207
+ /// asynchronously with respect to the host. Otherwise, the argument has no
208
+ /// effect.
209
+ void to(at::ScalarType dtype, bool non_blocking = false);
210
+
211
+ /// Recursively moves all parameters to the given device.
212
+ ///
213
+ /// If `non_blocking` is true and the source is in pinned memory and
214
+ /// destination is on the GPU or vice versa, the copy is performed
215
+ /// asynchronously with respect to the host. Otherwise, the argument has no
216
+ /// effect.
217
+ void to(at::Device device, bool non_blocking = false);
218
+
219
+ void save(
220
+ std::ostream& out,
221
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
222
+
223
+ void save(
224
+ const std::string& filename,
225
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
226
+
227
+ void _save_for_mobile(
228
+ std::ostream& out,
229
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
230
+ bool save_mobile_debug_info = false,
231
+ bool use_flatbuffer = false) const;
232
+
233
+ void _save_for_mobile(
234
+ const std::string& filename,
235
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
236
+ bool save_mobile_debug_info = false,
237
+ bool use_flatbuffer = false) const;
238
+
239
+ Module copy() const;
240
+
241
+ Module deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
242
+
243
+ // Clones both the underlying `ClassType` and the module instance(data), this
244
+ // function creates a new `ClassType` and returns a new instance that has the
245
+ // same data as the current instance but with the new type, shared ClassType
246
+ // will be preserved as well
247
+ Module clone(bool inplace = false) const;
248
+
249
+ // Clones both the underlying `ClassType` and the module instance(data), this
250
+ // function creates a new `ClassType` and returns a new instance that has the
251
+ // same data as the current instance but with the new type, shared ClassType
252
+ // will be preserved as well. Also allows the caller to specify a set of
253
+ // method and attribute names to not clone.
254
+ Module clone(
255
+ bool inplace,
256
+ const std::unordered_set<std::string>& ignored_method,
257
+ const std::unordered_set<std::string>& ignored_attributes) const;
258
+
259
+ void clone_method(const Module& orig, const std::string& name);
260
+
261
+ IValue operator()(std::vector<IValue> inputs);
262
+
263
+ template <typename... Types>
264
+ IValue create_class(const c10::QualifiedName& name, Types&&... args) const {
265
+ return create_class(name, {IValue(std::forward<Types>(args))...});
266
+ }
267
+
268
+ IValue create_class(const c10::QualifiedName& name, Stack stack) const;
269
+
270
+ inline bool operator==(const Module& y) const noexcept {
271
+ return _ivalue() == y._ivalue();
272
+ }
273
+
274
+ void set_delete_memory(std::shared_ptr<char> delete_mem) {
275
+ mem_to_delete_ = std::move(delete_mem);
276
+ }
277
+
278
+ // A set of functions to maintain input shapes through torch.jit.save and
279
+ // torch.jit.load. It only works on tensors and lists/dicts of tensors
280
+ // because tracing is only supported by these types.
281
+ void store_traced_inputs(std::string func_name, std::vector<IValue> inputs) {
282
+ if (inputs.size() == 0) {
283
+ return;
284
+ }
285
+ auto c10_inputs = c10::impl::GenericList(AnyType::get());
286
+ for (IValue& value : inputs) {
287
+ // Not checking whether this is traceable type as that is already checked
288
+ // higher up in the stack and changing that would require a larger
289
+ // restructuring.
290
+ c10_inputs.emplace_back(std::move(value));
291
+ }
292
+ traced_inputs_.insert_or_assign(func_name, c10_inputs);
293
+ }
294
+
295
+ c10::Dict<std::string, c10::impl::GenericList> retrieve_traced_inputs()
296
+ const {
297
+ return traced_inputs_;
298
+ }
299
+
300
+ private:
301
+ Module clone_impl(
302
+ std::unordered_map<TypePtr, TypePtr>& type_remap,
303
+ bool inplace,
304
+ IValue::HashAliasedIValueMap memo,
305
+ const std::unordered_set<std::string>& ignored_methods,
306
+ const std::unordered_set<std::string>& ignored_attributes) const;
307
+
308
+ void clone_method(
309
+ const Module& orig,
310
+ const Function& method,
311
+ const std::unordered_map<TypePtr, TypePtr>& type_remap);
312
+
313
+ c10::QualifiedName getNameForMethod(std::string basename) const {
314
+ return QualifiedName(*type()->name(), std::move(basename));
315
+ }
316
+
317
+ void to_impl(
318
+ const c10::optional<at::Device>& device,
319
+ const c10::optional<at::ScalarType>& dtype,
320
+ bool non_blocking);
321
+
322
+ // Extra handle for the module to delete when itself is deleted
323
+ std::shared_ptr<char> mem_to_delete_;
324
+
325
+ // Map of function names to the traced inputs that they have been traced with
326
+ c10::Dict<std::string, c10::impl::GenericList> traced_inputs_;
327
+
328
+ // Mutex to keep registring buffer or parameter thread safe.
329
+ std::shared_ptr<std::mutex> register_mutex_ = std::make_shared<std::mutex>();
330
+ };
331
+
332
+ // C++ equivalent api of `torch.jit.freeze`. See documentation there for
333
+ // details.
334
+ TORCH_API Module freeze(
335
+ const Module& module,
336
+ const c10::optional<std::vector<std::string>>& preserved_attrs =
337
+ c10::nullopt,
338
+ bool optimize_numerics = true);
339
+
340
+ // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation
341
+ // there for details.
342
+ TORCH_API Module optimize_for_inference(
343
+ Module& module,
344
+ const std::vector<std::string>& other_methods = {});
345
+
346
+ enum class FusionBehavior { STATIC, DYNAMIC };
347
+
348
+ using FusionStrategy = std::vector<std::pair<FusionBehavior, size_t>>;
349
+ // clang-format off
350
+ /*
351
+ Sets the type and number of specializations that can occur during fusion.
352
+
353
+ Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC
354
+ and depth is an integer.
355
+
356
+ Behavior - static vs dynamic:
357
+ In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
358
+ based on some initial profiling runs.
359
+ In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
360
+ shapes are possible.
361
+
362
+ In both cases, we also recompile on new striding behavior, device, or dtype.
363
+
364
+ Behavior - fallback functions & depth:
365
+ When an input doesn't match the format required by the specialized compiled op, it will run
366
+ a fallback function. Fallback functions are recursively be compiled and specialized based
367
+ on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
368
+ limit the number of specializations that can be compiled, before giving up on recompiling and
369
+ falling back to a completely un-fused, un-specialized implementation.
370
+
371
+ The list of (type, depth) pairs controls the type of specializations and the number of
372
+ specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first
373
+ two specializations will use static fusions, the following two specializations will use
374
+ dynamic fusion, and any inputs that satisfy none of the 4 options will run an
375
+ unfused implementation.
376
+
377
+ NB: in the future, if more as more fusion backends are added there may be more granular
378
+ apis for specific fusers.
379
+ */
380
+ // clang-format on
381
+ TORCH_API FusionStrategy getFusionStrategy();
382
+ // returns previous strategy
383
+ TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy);
384
+
385
+ namespace detail {
386
+
387
+ struct TORCH_API SlotCursor {
388
+ Module module_;
389
+ int64_t i_; // slot offset, -1 indicates the module itself
390
+ };
391
+
392
+ } // namespace detail
393
+
394
+ // This iterator allows the (optionally recursive) enumeration of
395
+ // the members of a Module. It performs a depth-first pre-order
396
+ // traversal of the module. The Policy template parameter determines
397
+ // which slots of the object should be included. For instance,
398
+ // when iterating parameters, we return the parameter tensors,
399
+ // but skip modules, buffers, and other attributes.
400
+ // See ModulePolicy for comments about Policy object's API.
401
+ template <typename Policy>
402
+ struct slot_iterator_impl {
403
+ using SlotCursor = detail::SlotCursor;
404
+ using value_type = typename Policy::value_type;
405
+ slot_iterator_impl(
406
+ Module root,
407
+ bool recurse, // if true, do a depth-first search, otherwise, just look at
408
+ // slots of root
409
+ bool return_module) // if true include root itself as the first thing
410
+ // visited (used in modules())
411
+ : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}),
412
+ recurse_(recurse) {
413
+ // advance iterator to first valid element (or the end, if empty)
414
+ while_not_valid_next();
415
+ }
416
+ // empty cursors_, represents end of iteration
417
+ slot_iterator_impl() : recurse_(false) {}
418
+ value_type operator*() const {
419
+ return Policy::create(cursors_, cur());
420
+ }
421
+ value_type operator->() const {
422
+ return **this;
423
+ }
424
+ slot_iterator_impl& operator++() {
425
+ next_valid();
426
+ return *this;
427
+ }
428
+ slot_iterator_impl operator++(int) {
429
+ // this is really expensive, should we delete it so people don't use it
430
+ // instead of prefix?
431
+ slot_iterator_impl old = *this;
432
+ ++(*this);
433
+ return old;
434
+ }
435
+
436
+ private:
437
+ // return_module() is a corner case where instead of returning a submodule
438
+ // of root, we are returning root itself, because we are iterating modules(),
439
+ // which contains the root module itself.
440
+ // It is represented with a single SlotCursor whose index is -1.
441
+ bool return_module() const {
442
+ return top().i_ == -1;
443
+ }
444
+ const SlotCursor& top() const {
445
+ return cursors_.back();
446
+ }
447
+ SlotCursor& top() {
448
+ return cursors_.back();
449
+ }
450
+ IValue cur() const {
451
+ return return_module() ? top().module_._ivalue()
452
+ : top().module_._ivalue()->getSlot(top().i_);
453
+ }
454
+
455
+ // advance to the next slot in a depth first pre-order traversal of the
456
+ // modules slots. This function does not guarantee the next slot is a
457
+ // valid element of the iteration. That is done by valid().
458
+ // invariant: !cursors_.empty()
459
+ void next() {
460
+ // we just returned the module itself, advance i_ to 0 so we are now
461
+ // at the first slot of the module.
462
+ if (return_module()) {
463
+ ++top().i_;
464
+ return;
465
+ }
466
+ // the last traversal action advanced beyond the number of slots in the
467
+ // module so continue the iteration in the parent.
468
+ if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) {
469
+ cursors_.pop_back();
470
+ if (!cursors_.empty()) {
471
+ ++top().i_;
472
+ }
473
+ return;
474
+ }
475
+ // if the current thing is a module, we have to scan it for recursive
476
+ // traversals. We do this by adding a new SlotCursor to track the traversal.
477
+ if (recurse_ &&
478
+ top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) {
479
+ cursors_.emplace_back(SlotCursor{cur().toModule(), 0});
480
+ return;
481
+ }
482
+ // common case: advance to the next slot.
483
+ ++top().i_;
484
+ }
485
+ // is the current position of the iterator a valid one?
486
+ // otherwise, we have to continue advancing.
487
+ bool valid() const {
488
+ return top().i_ <
489
+ int64_t(top().module_._ivalue()->type()->numAttributes()) &&
490
+ Policy::valid(
491
+ top().module_._ivalue()->type(),
492
+ top().i_,
493
+ top().module_._ivalue()->getSlot(top().i_));
494
+ }
495
+ void while_not_valid_next() {
496
+ // advance iteration until we are either at the end (cursors_.empty())
497
+ // or in a valid state. return_module() is a special case,
498
+ // and is always considered valid, regardless of Policy, because it is
499
+ // it is only true when we are iterating modules.
500
+ while (!cursors_.empty() && !return_module() && !valid()) {
501
+ next();
502
+ }
503
+ }
504
+ void next_valid() {
505
+ // avoid crashing if this is empty
506
+ if (cursors_.empty()) {
507
+ return;
508
+ }
509
+ // advance to next element, which is maybe not valid
510
+ next();
511
+ while_not_valid_next();
512
+ }
513
+
514
+ std::vector<SlotCursor> cursors_;
515
+ bool recurse_;
516
+
517
+ friend inline bool operator!=(
518
+ const slot_iterator_impl<Policy>& a,
519
+ const slot_iterator_impl<Policy>& b) {
520
+ // we are finished iteration when we have no more iteration SlotCursors.
521
+ // end is always an empty iterator with no cursors.
522
+ return (a.cursors_.empty() != b.cursors_.empty());
523
+ }
524
+ };
525
+
526
+ // This type represents lists of parameters, attributes, and
527
+ // submodules contained in the module. It is abstract because
528
+ // they are not stored directly in std::vectors but inside the
529
+ // module's IValue object itself.
530
+ template <typename Policy>
531
+ struct slot_list_impl {
532
+ using iterator = slot_iterator_impl<Policy>;
533
+ using const_iterator = slot_iterator_impl<Policy>;
534
+ using value_type = typename iterator::value_type;
535
+ slot_iterator_impl<Policy> begin() const {
536
+ return slot_iterator_impl<Policy>(module_, recurse_, return_module_);
537
+ }
538
+ slot_iterator_impl<Policy> end() const {
539
+ return slot_iterator_impl<Policy>();
540
+ }
541
+ size_t size() const {
542
+ if (!size_) {
543
+ size_ = size_t(0);
544
+ // NOLINTNEXTLINE(clang-diagnostic-unused-variable)
545
+ for (const value_type& s : *(this)) {
546
+ (void)s; // Suppress unused variable warning
547
+ ++*size_;
548
+ }
549
+ }
550
+ return *size_;
551
+ }
552
+
553
+ slot_list_impl(Module module, bool recurse, bool return_module)
554
+ : module_(std::move(module)),
555
+ recurse_(recurse),
556
+ return_module_(return_module),
557
+ size_(c10::nullopt) {
558
+ if (!recurse && !return_module && Policy::all_slots) {
559
+ size_ = module_.num_slots();
560
+ }
561
+ }
562
+
563
+ private:
564
+ Module module_;
565
+ bool recurse_;
566
+ bool return_module_;
567
+ // size of this list, cached on first request
568
+ // when we need to filter the slot list
569
+ mutable c10::optional<size_t> size_;
570
+ friend struct Module;
571
+ };
572
+
573
+ namespace detail {
574
+
575
+ // slot_iterator_impl always iterate over all the slots in a module,
576
+ // the Policy template argument determines slots should be returned and their
577
+ // types
578
+ struct TORCH_API ModulePolicy {
579
+ // the type of the value being returned
580
+ using value_type = Module;
581
+
582
+ // the logic for creating the type being returned, given the raw IValue
583
+ // of that object.
584
+ static value_type create(
585
+ const std::vector<detail::SlotCursor>& cursors,
586
+ IValue v) {
587
+ return Module(std::move(v).toObject());
588
+ }
589
+ // is slot i in typ something that this iterator should return, otherwise,
590
+ // we skip it.
591
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
592
+ return typ->getAttribute(i)->is_module();
593
+ }
594
+ // are we going to return everything? If so, we can optimize the calculate
595
+ // of the size of the list.
596
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
597
+ };
598
+
599
+ struct TORCH_API ParameterPolicy {
600
+ using value_type = at::Tensor;
601
+ static value_type create(
602
+ const std::vector<detail::SlotCursor>& cursors,
603
+ IValue v) {
604
+ return std::move(v).toTensor();
605
+ }
606
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
607
+ return typ->is_parameter(i) && v.isTensor();
608
+ }
609
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
610
+ };
611
+
612
+ struct TORCH_API BufferPolicy {
613
+ using value_type = at::Tensor;
614
+ static value_type create(
615
+ const std::vector<detail::SlotCursor>& cursors,
616
+ IValue v) {
617
+ return std::move(v).toTensor();
618
+ }
619
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
620
+ return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) &&
621
+ typ->is_buffer(i);
622
+ }
623
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
624
+ };
625
+
626
+ struct TORCH_API AttributePolicy {
627
+ using value_type = IValue;
628
+ static value_type create(
629
+ const std::vector<detail::SlotCursor>& cursors,
630
+ IValue v) {
631
+ return v;
632
+ }
633
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
634
+ return true;
635
+ }
636
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true;
637
+ };
638
+
639
+ // take a Policy object, and make a version of it that returns the slot.
640
+ // along with the fully qualified name of that slot. This is used for the named_
641
+ // variants like named_parameters().
642
+ template <typename Policy>
643
+ struct NamedPolicy {
644
+ using value_type = Named<typename Policy::value_type>;
645
+ static value_type create(
646
+ const std::vector<detail::SlotCursor>& cursors,
647
+ IValue v) {
648
+ std::string name;
649
+ if (cursors.size() == 1) {
650
+ name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back());
651
+ } else {
652
+ std::ostringstream ss;
653
+ for (const auto i : c10::irange(cursors.size())) {
654
+ if (i > 0) {
655
+ ss << ".";
656
+ }
657
+ ss << nameFragment(cursors[i]);
658
+ }
659
+ name = ss.str();
660
+ }
661
+ return value_type{std::move(name), Policy::create(cursors, std::move(v))};
662
+ }
663
+ static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) {
664
+ return Policy::valid(t, i, v);
665
+ }
666
+ static constexpr bool all_slots = Policy::all_slots;
667
+
668
+ private:
669
+ static std::string nameFragment(const detail::SlotCursor& f) {
670
+ return f.module_.type()->getAttributeName(f.i_);
671
+ }
672
+ };
673
+
674
+ } // namespace detail
675
+
676
+ TORCH_API bool& getInlineEverythingMode();
677
+
678
+ namespace script {
679
+ // We once had a `script::` namespace that was deleted. This is for backcompat
680
+ // of the public API; new code should not use this type alias.
681
+ using Module = ::torch::jit::Module;
682
+ using ExtraFilesMap = ::torch::jit::ExtraFilesMap;
683
+ } // namespace script
684
+
685
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/functional.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/api/method.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch::jit {
11
+
12
+ struct Resolver;
13
+ using ResolverPtr = std::shared_ptr<Resolver>;
14
+
15
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
16
+
17
+ // Throw this in C++ land if `attr` fails. This will be converted to a Python
18
+ // AttributeError by the Python binding code
19
+ class ObjectAttributeError : public std::runtime_error {
20
+ public:
21
+ ObjectAttributeError(const std::string& what) : std::runtime_error(what) {}
22
+ };
23
+
24
+ struct TORCH_API Object {
25
+ Object() = default;
26
+ Object(const Object&) = default;
27
+ Object& operator=(const Object&) = default;
28
+ Object(Object&&) noexcept = default;
29
+ Object& operator=(Object&&) noexcept = default;
30
+ Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {}
31
+ Object(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
32
+ Object(
33
+ c10::QualifiedName,
34
+ std::shared_ptr<CompilationUnit> cu,
35
+ bool shouldMangle = false);
36
+
37
+ ObjectPtr _ivalue() const {
38
+ TORCH_INTERNAL_ASSERT(_ivalue_);
39
+ return _ivalue_;
40
+ }
41
+
42
+ c10::ClassTypePtr type() const {
43
+ return _ivalue()->type();
44
+ }
45
+
46
+ struct Property {
47
+ std::string name;
48
+ Method getter_func;
49
+ c10::optional<Method> setter_func;
50
+ };
51
+
52
+ void setattr(const std::string& name, c10::IValue v) {
53
+ if (_ivalue()->type()->hasConstant(name)) {
54
+ TORCH_CHECK(
55
+ false,
56
+ "Can't set constant '",
57
+ name,
58
+ "' which has value:",
59
+ _ivalue()->type()->getConstant(name));
60
+ } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) {
61
+ const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot);
62
+ TORCH_CHECK(
63
+ v.type()->isSubtypeOf(*expected),
64
+ "Expected a value of type '",
65
+ expected->repr_str(),
66
+ "' for field '",
67
+ name,
68
+ "', but found '",
69
+ v.type()->repr_str(),
70
+ "'");
71
+ _ivalue()->setSlot(*slot, std::move(v));
72
+ } else {
73
+ TORCH_CHECK(false, "Module has no attribute '", name, "'");
74
+ }
75
+ }
76
+
77
+ c10::IValue attr(const std::string& name) const {
78
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
79
+ return _ivalue()->getSlot(*r);
80
+ }
81
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
82
+ return _ivalue()->type()->getConstant(*r);
83
+ }
84
+ std::stringstream err;
85
+ err << _ivalue()->type()->repr_str() << " does not have a field with name '"
86
+ << name.c_str() << "'";
87
+ throw ObjectAttributeError(err.str());
88
+ }
89
+
90
+ c10::IValue attr(const std::string& name, c10::IValue or_else) const {
91
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
92
+ return _ivalue()->getSlot(*r);
93
+ }
94
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
95
+ return _ivalue()->type()->getConstant(*r);
96
+ }
97
+ return or_else;
98
+ }
99
+
100
+ bool hasattr(const std::string& name) const {
101
+ return _ivalue()->type()->hasAttribute(name) ||
102
+ _ivalue()->type()->hasConstant(name);
103
+ }
104
+
105
+ // each object owns its methods. The reference returned here
106
+ // is guaranteed to stay valid until this module has been destroyed
107
+ Method get_method(const std::string& name) const {
108
+ if (auto method = find_method(name)) {
109
+ return *method;
110
+ }
111
+ AT_ERROR("Method '", name, "' is not defined.");
112
+ }
113
+
114
+ const std::vector<Method> get_methods() const {
115
+ return c10::fmap(type()->methods(), [&](Function* func) {
116
+ return Method(_ivalue(), func);
117
+ });
118
+ }
119
+
120
+ bool has_property(const std::string& name) const {
121
+ for (const auto& prop : type()->properties()) {
122
+ if (prop.name == name) {
123
+ return true;
124
+ }
125
+ }
126
+ return false;
127
+ }
128
+
129
+ const Property get_property(const std::string& name) const {
130
+ for (const auto& prop : type()->properties()) {
131
+ if (prop.name == name) {
132
+ c10::optional<Method> setter = c10::nullopt;
133
+ if (prop.setter) {
134
+ setter = Method(_ivalue(), prop.setter);
135
+ }
136
+ return Property{
137
+ prop.name, Method(_ivalue(), prop.getter), std::move(setter)};
138
+ }
139
+ }
140
+ AT_ERROR("Property '", name, "' is not defined.");
141
+ }
142
+
143
+ const std::vector<Property> get_properties() const {
144
+ return c10::fmap(type()->properties(), [&](ClassType::Property prop) {
145
+ c10::optional<Method> setter = c10::nullopt;
146
+ if (prop.setter) {
147
+ setter = Method(_ivalue(), prop.setter);
148
+ }
149
+ return Property{
150
+ std::move(prop.name),
151
+ Method(_ivalue(), prop.getter),
152
+ std::move(setter)};
153
+ });
154
+ }
155
+
156
+ c10::optional<Method> find_method(const std::string& basename) const;
157
+
158
+ /// Run a method from this module.
159
+ ///
160
+ /// For example:
161
+ /// @code
162
+ /// IValue output = module->run("relu_script", a, b);
163
+ /// @endcode
164
+ ///
165
+ /// To get a compile a module from a source string, see torch::jit::compile
166
+ ///
167
+ /// @param method_name The name of the method to run
168
+ /// @param args Arguments to be passed to the method
169
+ /// @return An IValue containing the return value (or values if it is a tuple)
170
+ /// from the method
171
+ template <typename... Types>
172
+ IValue run_method(const std::string& method_name, Types&&... args) {
173
+ return get_method(method_name)({IValue(std::forward<Types>(args))...});
174
+ }
175
+
176
+ // so that C++ users can easily add methods
177
+ void define(const std::string& src, const ResolverPtr& resolver = nullptr);
178
+
179
+ size_t num_slots() const {
180
+ return _ivalue()->slots().size();
181
+ }
182
+
183
+ // shallow copy the object
184
+ Object copy() const;
185
+
186
+ // Copies all the attributes of the object recursively without creating new
187
+ // `ClassType`, including deepcopy of Tensors
188
+ Object deepcopy() const;
189
+
190
+ private:
191
+ // mutable be we lazily initialize in module_object.
192
+ mutable ObjectPtr _ivalue_;
193
+ };
194
+
195
+ namespace script {
196
+ // We once had a `script::` namespace that was deleted. This is for backcompat
197
+ // of the public API; new code should not use this type alias.
198
+ using Object = ::torch::jit::Object;
199
+ } // namespace script
200
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/builtin_function.h>
4
+ #include <ATen/core/stack.h>
5
+ #include <torch/csrc/jit/backends/backend_interface.h>
6
+ #include <torch/custom_class.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace {
11
+ // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
12
+ inline c10::FunctionSchema getIsAvailableSchema() {
13
+ c10::Argument self("self", c10::AnyType::get());
14
+ c10::Argument available("available", c10::BoolType::get());
15
+ c10::FunctionSchema preprocessor_schema(
16
+ "is_available",
17
+ /*overload_name=*/"",
18
+ /*arguments=*/{self},
19
+ /*returns=*/{available});
20
+ return preprocessor_schema;
21
+ }
22
+
23
+ constexpr static auto kBackendsNamespace = "__backends__";
24
+
25
+ // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
26
+ inline c10::FunctionSchema getCompileSchema() {
27
+ c10::Argument self("self", c10::AnyType::get());
28
+ c10::Argument mod("processed", c10::AnyType::get());
29
+ auto any_dict_ty =
30
+ c10::DictType::create(c10::StringType::get(), c10::AnyType::get());
31
+ c10::Argument method_compile_spec("method_compile_spec", any_dict_ty);
32
+ c10::Argument handles("handles", any_dict_ty);
33
+
34
+ c10::FunctionSchema compile_schema(
35
+ "compile",
36
+ /*overload_name=*/"",
37
+ /*arguments=*/{self, mod, method_compile_spec},
38
+ /*returns=*/{handles});
39
+ return compile_schema;
40
+ }
41
+
42
+ // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
43
+ inline c10::FunctionSchema getExecuteSchema() {
44
+ auto any_list_ty = c10::ListType::create(c10::AnyType::get());
45
+ c10::Argument self("self", c10::AnyType::get());
46
+ c10::Argument handle("handle", c10::AnyType::get());
47
+ c10::Argument input("input", any_list_ty);
48
+ c10::Argument output("output", any_list_ty);
49
+ return c10::FunctionSchema(
50
+ "execute",
51
+ /*overload_name=*/"",
52
+ /*arguments=*/{self, handle, input},
53
+ /*returns=*/{output});
54
+ }
55
+
56
+ template <typename TBackendInterface>
57
+ std::function<void(Stack&)> getIsAvailableFunc() {
58
+ return [](Stack& stack) {
59
+ auto self = pop(stack).toCustomClass<TBackendInterface>();
60
+ auto ret = self->is_available();
61
+ push(stack, ret);
62
+ };
63
+ }
64
+
65
+ template <typename TBackendInterface>
66
+ std::function<void(Stack&)> getCompileFunc() {
67
+ return [](Stack& stack) {
68
+ auto method_compile_spec = pop(stack).toGenericDict();
69
+ auto processed = pop(stack);
70
+ auto self = pop(stack).toCustomClass<TBackendInterface>();
71
+ auto ret = self->compile(processed, method_compile_spec);
72
+ push(stack, ret);
73
+ };
74
+ }
75
+
76
+ template <typename TBackendInterface>
77
+ std::function<void(Stack&)> getExecuteFunc() {
78
+ return [](Stack& stack) {
79
+ auto args = pop(stack);
80
+ auto handle = pop(stack);
81
+ auto self = pop(stack);
82
+ auto backend = self.toCustomClass<TBackendInterface>();
83
+ auto res = backend->execute(handle, args.toList());
84
+ push(stack, res);
85
+ };
86
+ }
87
+ } // namespace
88
+
89
+ // Static registration API for backends.
90
+ template <class TBackendInterface>
91
+ class backend {
92
+ static_assert(
93
+ std::is_base_of<PyTorchBackendInterface, TBackendInterface>::value,
94
+ "torch::jit::backend<T> requires T to inherit from PyTorchBackendInterface");
95
+ std::string backend_name_;
96
+
97
+ public:
98
+ // Registers a new backend with /p name, and the given /p preprocess
99
+ // function.
100
+ backend(const std::string& name) : backend_name_(name) {
101
+ static auto cls = torch::class_<TBackendInterface>(kBackendsNamespace, name)
102
+ .def(torch::init<>())
103
+ ._def_unboxed(
104
+ "is_available",
105
+ getIsAvailableFunc<TBackendInterface>(),
106
+ getIsAvailableSchema())
107
+ ._def_unboxed(
108
+ "compile",
109
+ getCompileFunc<TBackendInterface>(),
110
+ getCompileSchema())
111
+ ._def_unboxed(
112
+ "execute",
113
+ getExecuteFunc<TBackendInterface>(),
114
+ getExecuteSchema());
115
+ }
116
+ };
117
+
118
+ } // namespace jit
119
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifndef BUILD_LITE_INTERPRETER
4
+ #include <torch/csrc/jit/backends/backend_debug_handler.h>
5
+ #endif
6
+ #include <torch/custom_class.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ constexpr static auto kBackendUtilsNamespace = "backendutils";
12
+ constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo";
13
+
14
+ #ifndef BUILD_LITE_INTERPRETER
15
+ /*
16
+ * Custom class for holding debug information in lowered modules, intended
17
+ * purely for keeping this information to be later serialized outside of the
18
+ * lowered module itself.
19
+ * Its usage pattern is:
20
+ * 1. LoweredModule declares an instance of this class in __backend_debug_info
21
+ * 2. During serialization, __backend_debug_info is used to obtain the debug
22
+ * information.
23
+ * 3. The contents of LoweredModule.__backend_debug_info are not serialized
24
+ * within the LoweredModule itself.
25
+ */
26
+ class TORCH_API PyTorchBackendDebugInfo : public torch::CustomClassHolder {
27
+ public:
28
+ PyTorchBackendDebugInfo() = default;
29
+
30
+ c10::optional<BackendDebugInfoMapType>& getDebugInfoMap() {
31
+ return debug_info_map_;
32
+ }
33
+
34
+ void setDebugInfoMap(BackendDebugInfoMapType&& debug_info_map) {
35
+ debug_info_map_ = std::move(debug_info_map);
36
+ }
37
+
38
+ private:
39
+ c10::optional<BackendDebugInfoMapType> debug_info_map_;
40
+ };
41
+
42
+ #else
43
+
44
+ /*
45
+ * Dummy instance exists for the following reason:
46
+ * __backend_debug_info is of type BackendDebugInfo which is a torchbind'
47
+ * class backed by cpp class PyTorchBackendDebugInfo.
48
+ * PyTorchBackendDebugInfo, depends on ir.h., scope.h, source_range etc.
49
+ * We dont include this on lite interpreter side. Thus on lite interpreter side
50
+ * we cannot have valid definition of PyTorchBackendDebugInfo. However we do not
51
+ * need valid instance of __backend_debug_info in lite interpreter anyway as we
52
+ * dont serialize this info as part of LowerdModule as mentioned ealrier.
53
+ * However since LoweredModule has registered attribute of __backend_debug_info
54
+ * we still need to make sure that BackendDebugInfo is registered with
55
+ * TorchScript. However in this instance it does not have to be backed by
56
+ * PyTorchBackendDebugInfo, so we create a dummy PyTorchBackendDebugInfoDummy
57
+ * just for this purpose.
58
+ */
59
+ class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder {
60
+ public:
61
+ PyTorchBackendDebugInfoDummy() = default;
62
+ };
63
+ #endif
64
+ } // namespace jit
65
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ #include <ATen/core/jit_type.h>
6
+
7
+ #include <functional>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ using DebugHandleType = int64_t;
13
+
14
+ using NodeToDebugHandle = std::unordered_map<Node*, DebugHandleType>;
15
+
16
+ using BackendDebugHandleGenerator =
17
+ std::function<NodeToDebugHandle(const std::shared_ptr<Graph>&)>;
18
+
19
+ namespace detail {
20
+
21
+ using BackendPreprocessFunction = std::function<c10::IValue(
22
+ const Module&,
23
+ const c10::Dict<IValue, IValue>&,
24
+ const BackendDebugHandleGenerator& generate_debug_handles)>;
25
+
26
+ TORCH_API void registerBackendPreprocessFunction(
27
+ const std::string& name,
28
+ const BackendPreprocessFunction& preprocess);
29
+
30
+ bool hasBackendPreprocessFunction(const std::string& name);
31
+
32
+ BackendPreprocessFunction getBackendPreprocessFunction(const std::string& name);
33
+
34
+ TORCH_API Module codegen_backend_module(
35
+ const std::string& backend_name,
36
+ const Module& orig_module,
37
+ const c10::Dict<IValue, IValue>& method_compile_spec,
38
+ const c10::DictTypePtr& any_dict_ty);
39
+ } // namespace detail
40
+ } // namespace jit
41
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+
4
+ namespace c10 {
5
+ class TORCH_API BackendRuntimeException : public c10::Error {
6
+ public:
7
+ // Use debug_handle to throw exception
8
+ BackendRuntimeException(
9
+ SourceLocation loc,
10
+ std::string msg,
11
+ int64_t debug_handle)
12
+ : c10::Error(loc, msg) {
13
+ debug_handles.push_back(debug_handle);
14
+ }
15
+ // If rethrowing, can push another debug_handle
16
+ // This is useful in couple of scenarios.
17
+ // 1. A submodule is lowered and lite interperter has CallMethod
18
+ // to lowered module's method. In this case lowered module will throw with
19
+ // a handle, plus there will be another debug handle corresponding
20
+ // to the CallMethod node in lite interpreter. Both together give complete
21
+ // trace. This function allows lite interpreter to rethrow with debug
22
+ // handle it has for CallMethod.
23
+ // 2. Another scenarios is when lite interperter can make function calls or
24
+ // the lowered backend also has function call ability. Thus we have
25
+ // multiple function frames. Now we need a stack of handles to symbolicate
26
+ // entire stack trace.
27
+ void pushDebugHandle(int64_t debug_handle) {
28
+ debug_handles.push_back(debug_handle);
29
+ }
30
+ const std::vector<int64_t>& getDebugHandles() {
31
+ return debug_handles;
32
+ }
33
+
34
+ private:
35
+ // Stores stack of debug handles.
36
+ std::vector<int64_t> debug_handles;
37
+ };
38
+
39
+ } // namespace c10
40
+ #define TORCH_DELEGATED_BACKEND_THROW(cond, msg, debug_handle) \
41
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
42
+ throw ::c10::BackendRuntimeException( \
43
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
44
+ msg, \
45
+ debug_handle); \
46
+ }
47
+
48
+ #define TORCH_DELEGATED_BACKEND_RETHROW(e, debug_handle) \
49
+ do { \
50
+ e.pushDebugHandle(debug_handle); \
51
+ throw; \
52
+ } while (false)
53
+
54
+ #define DEBUG_HANDLE_UNKNOWN -1
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/python/pybind.h>
4
+ #include <torch/csrc/utils/pybind.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ // Initialize Python bindings for JIT to_<backend> functions.
9
+ void initJitBackendBindings(PyObject* module);
10
+ } // namespace jit
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/custom_class.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Interface for a JIT backend.
9
+ class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
10
+ public:
11
+ PyTorchBackendInterface() noexcept;
12
+ ~PyTorchBackendInterface() override;
13
+
14
+ // Returns true if the backend is available to process delegation calls.
15
+ virtual bool is_available() = 0;
16
+
17
+ // Compile the module contained in \p processed using the details provided in
18
+ // \p method_compile_spec for each module method that should be compiled for
19
+ // the backend. \p method_compile_spec should be of type Dict<string, Any>.
20
+ // \returns a dictionary of type Dict<string, Any> that contains a backend
21
+ // handle each method that can run on the backend (i.e. each key in \p
22
+ // method_compile_spec).
23
+ virtual c10::impl::GenericDict compile(
24
+ c10::IValue processed,
25
+ c10::impl::GenericDict method_compile_spec) = 0;
26
+
27
+ // Execute the method specified by \p handle using \p inputs. \returns the
28
+ // outputs as a tuple.
29
+ virtual c10::impl::GenericList execute(
30
+ c10::IValue handle,
31
+ c10::impl::GenericList inputs) = 0;
32
+ };
33
+ } // namespace jit
34
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/backends/backend_detail.h>
4
+ namespace torch {
5
+ namespace jit {
6
+ class backend_preprocess_register {
7
+ std::string backend_name_;
8
+
9
+ public:
10
+ backend_preprocess_register(
11
+ const std::string& name,
12
+ const detail::BackendPreprocessFunction& preprocess)
13
+ : backend_name_(name) {
14
+ detail::registerBackendPreprocessFunction(name, preprocess);
15
+ }
16
+ };
17
+ } // namespace jit
18
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/frontend/resolver.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ // Create a Resolver for use in generating LoweredModules for specific backends.
8
+ TORCH_API std::shared_ptr<Resolver> loweredModuleResolver();
9
+ } // namespace jit
10
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API const std::vector<Function*>& getAllBuiltinFunctionsFor(Symbol name);
10
+ } // namespace jit
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <memory>
3
+
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct Graph;
10
+
11
+ // Transforms loops so that they can be represented as python
12
+ // for or while loops
13
+ TORCH_API void CanonicalizeModifiedLoops(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/python/pybind_utils.h>
6
+ #include <memory>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT };
14
+ class ConcreteModuleType;
15
+
16
+ // You can think of an nn.Module as a template that corresponds to a family of
17
+ // JIT types. The template "arguments" are things like the constant values.
18
+ // e.g.
19
+ // class M(nn.Module):
20
+ // __constants__ = ["const"]
21
+ // ...
22
+ //
23
+ // Is similar to writing the following in C++:
24
+ //
25
+ // template<TConst>
26
+ // class M {
27
+ // ...
28
+ // }
29
+ //
30
+ // We need to consider each different member of the type family a different JIT
31
+ // type because, e.g. different constant values lead to different versions of
32
+ // the same method.
33
+ //
34
+ // ConcreteModuleType corresponds to a single member of the type family, with
35
+ // all template arguments fully specified. Two Modules that share a
36
+ // ConcreteModuleType can share a JIT type, and vice versa.
37
+ //
38
+ // Why not just use a JIT type to represent concrete types? Because constants,
39
+ // function attributes, etc. are currently not representable in the type system,
40
+ // so this acts a non-first-class way of tracking concrete types.
41
+ //
42
+ // ConcreteModuleType is also the source of truth for servicing all
43
+ // ModuleValue::attr calls. This is so we can guarantee that if two Module's
44
+ // share a JIT type (and thus a ConcreteModuleType), then they behave the same
45
+ // way when you access attributes on them.
46
+
47
+ // ConcreteModuleType has two phases.
48
+ // 1. Creation: First we build it up, during the ScriptModule conversion
49
+ // process. This is represented by ConcreteModuleTypeBuilder.
50
+ // ...then the converter calls ConcreteModuleTypeBuilder::build(), producing
51
+ // a
52
+ // ConcreteModuleType ready for querying.
53
+ // 2. Querying: We use ConcreteModuleType as a source of truth for
54
+ // ModuleValue::attr calls during method compilation.
55
+
56
+ // Represents a concrete type during in the process for construction. We use
57
+ // this to decide whether we can share types between modules.
58
+ class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder {
59
+ public:
60
+ explicit ConcreteModuleTypeBuilder(py::object pyClass) {
61
+ TORCH_INTERNAL_ASSERT(pyClass);
62
+ pyClass_ = std::move(pyClass);
63
+ }
64
+
65
+ void addConstant(std::string name, py::object value);
66
+ void addConstant(std::string name, IValue value);
67
+ void addAttribute(
68
+ std::string name,
69
+ const TypePtr& type,
70
+ bool isParameter,
71
+ bool isBuffer);
72
+ void addFunctionAttribute(
73
+ std::string name,
74
+ const TypePtr& type,
75
+ py::object pyFunction);
76
+
77
+ void addModule(std::string name, std::shared_ptr<ConcreteModuleType> meta);
78
+
79
+ void addForwardHook(py::object hook);
80
+ void addForwardPreHook(py::object pre_hook);
81
+
82
+ void addOverload(
83
+ std::string methodName,
84
+ std::vector<std::string> overloadedMethodNames);
85
+ void addBuiltinFunction(std::string name, const std::string& symbol_name);
86
+ void addFailedAttribute(std::string name, std::string failureReason);
87
+ void addIgnoredAttribute(std::string name);
88
+ void setIterableModuleKind(IterableModuleKind kind);
89
+
90
+ // If a ConcreteModuleType is poisoned, it will never compare equal to any
91
+ // other concrete type
92
+ void setPoisoned();
93
+
94
+ std::shared_ptr<ConcreteModuleType> build() const {
95
+ return std::make_shared<ConcreteModuleType>(*this);
96
+ }
97
+
98
+ // This determines whether two modules can share a type. The container structs
99
+ // used by ConcreteModuleType have been defined such that operator==
100
+ // implements a meaningful comparison in that context.
101
+ bool equals(const ConcreteModuleTypeBuilder& other) const;
102
+
103
+ struct FunctionAttribute {
104
+ FunctionTypePtr function_;
105
+ py::object pyFunction_;
106
+
107
+ friend bool operator==(
108
+ const FunctionAttribute& lhs,
109
+ const FunctionAttribute& rhs) {
110
+ // Functions are not first class, so we can't do type comparison like a
111
+ // regular attribute. So we do a pointer equality check on the actual
112
+ // Python function object.
113
+ return lhs.pyFunction_.is(rhs.pyFunction_);
114
+ }
115
+ };
116
+
117
+ struct Attribute {
118
+ Attribute(TypePtr type, bool isParam, bool isBuffer)
119
+ : type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {}
120
+
121
+ friend bool operator==(const Attribute& lhs, const Attribute& rhs) {
122
+ return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_;
123
+ }
124
+ TypePtr type_;
125
+ bool isParam_;
126
+ bool isBuffer_;
127
+ };
128
+
129
+ struct ModuleInfo {
130
+ ModuleInfo(std::string name, std::shared_ptr<ConcreteModuleType> meta)
131
+ : name_(std::move(name)), meta_(std::move(meta)) {}
132
+
133
+ friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs);
134
+
135
+ std::string name_;
136
+ std::shared_ptr<ConcreteModuleType> meta_;
137
+ };
138
+
139
+ private:
140
+ ConcreteModuleTypeBuilder() = default;
141
+ ClassTypePtr createTypeFromThis() const;
142
+
143
+ // If true, this type will never compare equally to anything else. This is
144
+ // used if we want to ensure that this type is not shared (for example, if it
145
+ // came from a traced module)
146
+ bool isPoisoned_ = false;
147
+
148
+ // The value of any constants defined by the module.
149
+ std::unordered_map<std::string, IValue> constants_;
150
+ // The types of any attributes
151
+ OrderedDict<std::string, Attribute> attributes_;
152
+ // Overloads, in the same format as `__overloads__` in Python
153
+ std::unordered_map<std::string, std::vector<std::string>> overloads_;
154
+ // Any attributes we failed to convert to TorchScript, along with a hint as to
155
+ // why
156
+ std::unordered_map<std::string, std::string> failedAttributes_;
157
+ // Any attributes that were marked as ignored. They cannot be used in
158
+ // TorchScript but can still be used in ignored function in Python.
159
+ std::unordered_set<std::string> ignoredAttributes_;
160
+ // Any function attributes. These are special right now because functions are
161
+ // not first-class in the type system.
162
+ std::unordered_map<std::string, FunctionAttribute> functionAttributes_;
163
+ // Function attributes that are calls to builtin functions. These get
164
+ // de-sugared directly into the corresponding aten:: call. The map is
165
+ // attribute name -> aten symbol name
166
+ std::unordered_map<std::string, c10::Symbol> builtinFunctions_;
167
+ // The concrete types of any submodules
168
+ std::vector<ModuleInfo> modules_;
169
+ // Hooks to be called before/after forward when the module
170
+ // is called directly. Used to ensure modules have different types
171
+ // when they have different python hooks
172
+ // Actual hooks are added to ClassType directly during compilation
173
+ std::vector<py::object> forwardHooks_;
174
+ std::vector<py::object> forwardPreHooks_;
175
+
176
+ // If something is a ModuleDict/ModuleList, it means:
177
+ // 1. The order of the submodules matters for comparing the type
178
+ // 2. The compiler is allowed to treat it like a dict/tuple
179
+ IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE;
180
+
181
+ // The original `nn.Module` class that we derived this ScriptModule from.
182
+ py::object pyClass_;
183
+
184
+ // NOTE: If you ever add any more state to this struct, you need to make sure
185
+ // operator== still makes sense!
186
+ friend ConcreteModuleType;
187
+ };
188
+
189
+ // Represents a finalized concrete type, used to service ModuleValue::attr calls
190
+ // during method compilation.
191
+ class VISIBILITY_HIDDEN ConcreteModuleType {
192
+ public:
193
+ explicit ConcreteModuleType(ConcreteModuleTypeBuilder data);
194
+
195
+ static std::shared_ptr<ConcreteModuleType> fromJitType(TypePtr type);
196
+
197
+ TypePtr getJitType() const;
198
+ c10::optional<py::object> getPyClass() const;
199
+ IterableModuleKind getIterableModuleKind() const;
200
+ c10::optional<std::vector<std::string>> findOverloads(
201
+ const std::string& name) const;
202
+ c10::optional<Function*> findFunctionAttribute(const std::string& name) const;
203
+ c10::optional<c10::Symbol> findBuiltinFunction(const std::string& name) const;
204
+ std::shared_ptr<ConcreteModuleType> findSubmoduleConcreteType(
205
+ const std::string& name) const;
206
+ c10::optional<std::string> findFailedAttribute(const std::string& name) const;
207
+ bool isIgnoredAttribute(const std::string& name) const;
208
+
209
+ // These getters are only here to return things as types that can be
210
+ // automatically converted by pybind.
211
+ std::unordered_map<std::string, py::object> getConstantsPy() const;
212
+ std::unordered_map<std::string, std::pair<TypePtr, bool>> getAttributesPy()
213
+ const;
214
+ std::vector<std::pair<std::string, std::shared_ptr<ConcreteModuleType>>>
215
+ getModulesPy() const;
216
+
217
+ bool equals(const ConcreteModuleType& other) const {
218
+ if (jitType_ == other.jitType_) {
219
+ // If the computed types are the same, these modules can (obviously) share
220
+ // a type.
221
+ return true;
222
+ }
223
+
224
+ return data_.equals(other.data_);
225
+ }
226
+ bool equals(const ConcreteModuleTypeBuilder& other) const {
227
+ return data_.equals(other);
228
+ }
229
+
230
+ void dump() const;
231
+
232
+ private:
233
+ ConcreteModuleType() = default;
234
+
235
+ // The JIT type derived from this ConcreteModuleType.
236
+ ConcreteModuleTypeBuilder data_;
237
+ TypePtr jitType_;
238
+ };
239
+
240
+ } // namespace jit
241
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Convert a graph with Loads & Stores into SSA form
13
+ TORCH_API void ConvertToSSA(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstddef>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API size_t ComputeEditDistance(
10
+ const char* word1,
11
+ const char* word2,
12
+ size_t maxEditDistance);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+ #include <torch/csrc/jit/frontend/tree.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct Call {
10
+ std::string fn_name;
11
+ SourceRange caller_range;
12
+ };
13
+
14
+ struct TORCH_API ErrorReport : public std::exception {
15
+ ErrorReport(const ErrorReport& e);
16
+
17
+ explicit ErrorReport(SourceRange r);
18
+ explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {}
19
+ explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {}
20
+
21
+ const char* what() const noexcept override;
22
+
23
+ struct TORCH_API CallStack {
24
+ // These functions are used to report why a function was being compiled
25
+ // (i.e. what was the call stack of user functions at compilation time that
26
+ // led to this error)
27
+ CallStack(const std::string& name, const SourceRange& range);
28
+ ~CallStack();
29
+
30
+ // Change the range that is relevant for the current function (i.e. after
31
+ // each successful expression compilation, change it to the next expression)
32
+ static void update_pending_range(const SourceRange& range);
33
+ };
34
+
35
+ static std::string current_call_stack();
36
+
37
+ private:
38
+ template <typename T>
39
+ friend const ErrorReport& operator<<(const ErrorReport& e, const T& t);
40
+
41
+ mutable std::stringstream ss;
42
+ OwnedSourceRange context;
43
+ mutable std::string the_message;
44
+ std::vector<Call> error_stack;
45
+ };
46
+
47
+ template <typename T>
48
+ const ErrorReport& operator<<(const ErrorReport& e, const T& t) {
49
+ e.ss << t;
50
+ return e;
51
+ }
52
+
53
+ } // namespace jit
54
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void TransformExits(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function_schema.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <string>
6
+ #include <variant>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName(
12
+ const std::string& schemaOrName);
13
+ TORCH_API c10::FunctionSchema parseSchema(const std::string& schema);
14
+ TORCH_API c10::OperatorName parseName(const std::string& name);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph);
13
+ TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/jit/api/module.h>
7
+ #include <torch/csrc/jit/frontend/error_report.h>
8
+ #include <torch/csrc/jit/frontend/resolver.h>
9
+ #include <torch/csrc/jit/frontend/sugared_value.h>
10
+ #include <torch/csrc/jit/frontend/tree_views.h>
11
+ #include <torch/csrc/jit/ir/ir.h>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void runCleanupPasses(std::shared_ptr<Graph>& to_clean);
17
+
18
+ TORCH_API bool meaningfulName(const std::string& name);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <c10/util/C++17.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/jit/frontend/parser_constants.h>
7
+ #include <torch/csrc/jit/frontend/source_range.h>
8
+ #include <torch/csrc/jit/frontend/strtod.h>
9
+ #include <algorithm>
10
+ #include <clocale>
11
+ #include <cstdlib>
12
+ #include <memory>
13
+ #include <sstream>
14
+ #include <string>
15
+ #include <vector>
16
+
17
+ C10_CLANG_DIAGNOSTIC_PUSH()
18
+ #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
19
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
20
+ #endif
21
+
22
+ namespace torch {
23
+ namespace jit {
24
+
25
+ // single character tokens are just the character itself '+'
26
+ // multi-character tokens need an entry here
27
+ // if the third entry is not the empty string, it is used
28
+ // in the lexer to match this token.
29
+
30
+ // These kinds are also used in Tree.h as the kind of the AST node.
31
+ // Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the
32
+ // lexer.
33
+
34
+ #define TC_FORALL_TOKEN_KINDS(_) \
35
+ _(TK_EOF, "eof", "") \
36
+ _(TK_WHITESPACE, "whitespace", "") \
37
+ _(TK_WHITESPACE_EOF, "whitespace_eof", "") \
38
+ _(TK_NUMBER, "number", "") \
39
+ _(TK_NEWLINE, "newline", "") \
40
+ _(TK_INDENT, "indent", "") \
41
+ _(TK_DEDENT, "dedent", "") \
42
+ _(TK_DEF, "def", "def") \
43
+ _(TK_EQUIVALENT, "equivalent", "<=>") \
44
+ _(TK_IDENT, "ident", "") \
45
+ _(TK_STRING, "string", "") \
46
+ _(TK_STRINGLITERAL, "string_literal", "") \
47
+ _(TK_CONST, "const", "") \
48
+ _(TK_LIST, "list", "") \
49
+ _(TK_DICT, "dict", "") \
50
+ _(TK_OPTION, "option", "") \
51
+ _(TK_APPLY, "apply", "") \
52
+ _(TK_COMPREHENSION, "comprehension", "") \
53
+ _(TK_RANGE_CONSTRAINT, "range_constraint", "") \
54
+ _(TK_PARAM, "param", "") \
55
+ _(TK_INFERRED, "inferred", "") \
56
+ _(TK_ACCESS, "access", "") \
57
+ _(TK_ASSIGN, "assign", "") \
58
+ _(TK_AUG_ASSIGN, "aug_assign", "") \
59
+ _(TK_ATTRIBUTE, "attribute", "") \
60
+ _(TK_IF, "if", "if") \
61
+ _(TK_ELSE, "else", "else") \
62
+ _(TK_ELIF, "elif", "elif") \
63
+ _(TK_WHILE, "while", "while") \
64
+ _(TK_EXPR_STMT, "expression statement", "") \
65
+ _(TK_RETURN, "return", "return") \
66
+ _(TK_IS, "is", "is") \
67
+ _(TK_ISNOT, "is not", "is not") \
68
+ _(TK_NE, "ne", "!=") \
69
+ _(TK_EQ, "eq", "==") \
70
+ _(TK_LE, "le", "<=") \
71
+ _(TK_GE, "ge", ">=") \
72
+ _(TK_FLOOR_DIV, "floordiv", "//") \
73
+ _(TK_IF_EXPR, "if", "") \
74
+ _(TK_TRUE, "True", "True") \
75
+ _(TK_FALSE, "False", "False") \
76
+ _(TK_NONE, "None", "None") \
77
+ _(TK_AND, "and", "and") \
78
+ _(TK_OR, "or", "or") \
79
+ _(TK_NOT, "not", "not") \
80
+ _(TK_LSHIFT, "<<", "<<") \
81
+ _(TK_RSHIFT, ">>", ">>") \
82
+ _(TK_CAST, "cast", "") \
83
+ _(TK_PLUS_EQ, "+=", "+=") \
84
+ _(TK_MINUS_EQ, "-=", "-=") \
85
+ _(TK_TIMES_EQ, "*=", "*=") \
86
+ _(TK_DIV_EQ, "/=", "/=") \
87
+ _(TK_MOD_EQ, "%=", "%=") \
88
+ _(TK_BIT_OR_EQ, "|=", "|=") \
89
+ _(TK_BIT_AND_EQ, "&=", "&=") \
90
+ _(TK_BIT_XOR_EQ, "^=", "^=") \
91
+ _(TK_LSHIFT_EQ, "<<=", "<<=") \
92
+ _(TK_RSHIFT_EQ, ">>=", ">>=") \
93
+ _(TK_POW_EQ, "**=", "**=") \
94
+ _(TK_GLOBAL, "global", "global") \
95
+ _(TK_BUILT_IN, "built-in", "") \
96
+ _(TK_SUBSCRIPT, "subscript", "") \
97
+ _(TK_VAR, "variable", "") \
98
+ _(TK_NOTHING, "nothing", "") \
99
+ _(TK_DICT_LITERAL, "dict-literal", "") \
100
+ _(TK_LIST_LITERAL, "list-literal", "") \
101
+ _(TK_TUPLE_LITERAL, "tuple-literal", "") \
102
+ _(TK_FOR, "for", "for") \
103
+ _(TK_IN, "in", "in") \
104
+ _(TK_NOTIN, "not in", "not in") \
105
+ _(TK_STARRED, "starred", "") \
106
+ _(TK_UNARY_MINUS, "unary minus", "") \
107
+ _(TK_POW, "pow operator", "**") \
108
+ _(TK_ARROW, "arrow", "->") \
109
+ _(TK_DECL, "decl", "") \
110
+ _(TK_SLICE_EXPR, "slice expr", "") \
111
+ _(TK_TYPE_COMMENT, "type comment", "# type:") \
112
+ _(TK_RAISE, "raise", "raise") \
113
+ _(TK_ASSERT, "assert", "assert") \
114
+ _(TK_DOTS, "dots", "...") \
115
+ _(TK_LIST_COMP, "list comprehension", "") \
116
+ _(TK_DICT_COMP, "dict comprehension", "") \
117
+ _(TK_BREAK, "break", "break") \
118
+ _(TK_CONTINUE, "continue", "continue") \
119
+ _(TK_DELETE, "del", "del") \
120
+ _(TK_PASS, "pass", "pass") \
121
+ _(TK_CLASS_DEF, "class", "class") \
122
+ _(TK_IMPORT, "import", "import") \
123
+ _(TK_WITH, "with", "with") \
124
+ _(TK_WITH_ITEM, "withitem", "") \
125
+ _(TK_AS, "as", "as") \
126
+ _(TK_PROP, "property", "") \
127
+ _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \
128
+ _(TK_NONE_TYPE, "NoneType", "NoneType")
129
+
130
+ enum TokenKind {
131
+ // we use characters to represent themselves so skip all valid characters
132
+ // before
133
+ // assigning enum values to multi-char tokens.
134
+ TK_DUMMY_START = 256,
135
+ #define DEFINE_TOKEN(tok, _, _2) tok,
136
+ TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN)
137
+ #undef DEFINE_TOKEN
138
+ };
139
+
140
+ TORCH_API std::string kindToString(int kind);
141
+ TORCH_API int stringToKind(const std::string& str);
142
+
143
+ // nested hash tables that indicate char-by-char what is a valid token.
144
+ struct TokenTrie;
145
+ using TokenTrieRef = std::unique_ptr<TokenTrie>;
146
+ struct TokenTrie {
147
+ TokenTrie() : kind(0) {}
148
+ void insert(const char* str, int tok) {
149
+ if (*str == '\0') {
150
+ AT_ASSERT(kind == 0);
151
+ kind = tok;
152
+ return;
153
+ }
154
+
155
+ for (size_t i = 0, e = child_chars.size(); i < e; ++i) {
156
+ if (child_chars[i] == *str) {
157
+ child_tries[i]->insert(str + 1, tok);
158
+ return;
159
+ }
160
+ }
161
+
162
+ child_chars.emplace_back(*str);
163
+ child_tries.emplace_back(std::make_unique<TokenTrie>());
164
+ child_tries.back()->insert(str + 1, tok);
165
+ }
166
+ int kind; // 0 == invalid token
167
+
168
+ std::vector<char> child_chars;
169
+ std::vector<TokenTrieRef> child_tries;
170
+ };
171
+
172
+ // stuff that is shared against all TC lexers/parsers and is initialized only
173
+ // once.
174
+ struct TORCH_API SharedParserData {
175
+ SharedParserData() : head(new TokenTrie()) {
176
+ std::stringstream ss;
177
+ for (const char* c = valid_single_char_tokens; *c; c++) {
178
+ std::string str(1, *c);
179
+ head->insert(str.c_str(), *c);
180
+ }
181
+
182
+ #define ADD_CASE(tok, _, tokstring) \
183
+ if (*(tokstring) != '\0') { \
184
+ head->insert((tokstring), (tok)); \
185
+ }
186
+ TC_FORALL_TOKEN_KINDS(ADD_CASE)
187
+ #undef ADD_CASE
188
+ }
189
+
190
+ bool match(
191
+ StringCordView::Iterator pos,
192
+ bool continuation, // are we inside a scope where newlines don't count
193
+ // (e.g. inside parens)
194
+ bool whitespace_token, // should we treat whitespace as a token
195
+ int* kind,
196
+ StringCordView::Iterator* start,
197
+ StringCordView::Iterator* end) {
198
+ *start = pos;
199
+ // skip whitespace
200
+ while (pos.has_next() && isblank(*pos)) {
201
+ ++pos;
202
+ }
203
+
204
+ // special handling
205
+ if (pos.has_next()) {
206
+ if (*pos == '#' && !isTypeComment(pos)) {
207
+ // skip comments
208
+ while (pos.has_next() && *pos != '\n')
209
+ ++pos;
210
+ // tail call, handle whitespace and more comments
211
+ return match(pos, continuation, whitespace_token, kind, start, end);
212
+ }
213
+ if (*pos == '\\') {
214
+ auto newiter = pos;
215
+ ++newiter;
216
+ if (newiter.has_next() && *newiter == '\n' && !whitespace_token) {
217
+ ++newiter;
218
+ return match(newiter, continuation, false, kind, start, end);
219
+ }
220
+ }
221
+ if (*pos == '\n') {
222
+ return match(++pos, continuation, !continuation, kind, start, end);
223
+ }
224
+ }
225
+ // we handle white space before EOF because in the case we have something
226
+ // like the following where we need to generate the dedent token if foo:
227
+ // ...
228
+ // else:
229
+ // pass
230
+ if (whitespace_token) {
231
+ *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE;
232
+ *end = pos;
233
+ return true;
234
+ }
235
+ if (!pos.has_next()) {
236
+ *kind = TK_EOF;
237
+ *start = pos;
238
+ *end = *start;
239
+ return true;
240
+ }
241
+ // invariant: the next token is not whitespace or newline
242
+ *start = pos;
243
+ // check for a valid number
244
+ size_t len;
245
+ if (isNumber(pos.rest_line(), 0, &len)) {
246
+ *end = *start;
247
+ *end += len;
248
+ *kind = TK_NUMBER;
249
+ return true;
250
+ }
251
+ // check for string
252
+ if (isString(pos.rest_line(), 0, &len)) {
253
+ *kind = TK_STRINGLITERAL;
254
+ *end = *start;
255
+ *end += len;
256
+ return true;
257
+ }
258
+
259
+ // check for either an ident or a token
260
+ // ident tracks whether what we have scanned so far could be an identifier
261
+ // matched indicates if we have found any match.
262
+ bool matched = false;
263
+ bool ident = true;
264
+ TokenTrie* cur = head.get();
265
+ // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr);
266
+ // i++)
267
+ for (size_t i = 0; pos.has_next() && (ident || cur != nullptr);
268
+ ++pos, ++i) {
269
+ ident = ident && validIdent(i, *pos);
270
+ if (ident) {
271
+ matched = true;
272
+ *end = pos.next_iter();
273
+ *kind = TK_IDENT;
274
+ }
275
+ // check for token second, so that e.g. 'max' matches the token TK_MAX
276
+ // rather the
277
+ // identifier 'max'
278
+ if (cur) {
279
+ const auto begin_it = cur->child_chars.begin();
280
+ const auto end_it = cur->child_chars.end();
281
+ const auto ch_it = std::find(begin_it, end_it, *pos);
282
+
283
+ cur = (ch_it == end_it) ? nullptr
284
+ : cur->child_tries[ch_it - begin_it].get();
285
+
286
+ if (cur && cur->kind != 0) {
287
+ matched = true;
288
+ *end = pos.next_iter();
289
+ *kind = cur->kind;
290
+ }
291
+ }
292
+ }
293
+ return matched;
294
+ }
295
+
296
+ bool isUnary(int kind, int* prec);
297
+ bool isBinary(int kind, int* prec);
298
+ bool isRightAssociative(int kind) {
299
+ switch (kind) {
300
+ case '?':
301
+ case TK_POW:
302
+ case TK_IF:
303
+ return true;
304
+ default:
305
+ return false;
306
+ }
307
+ }
308
+
309
+ private:
310
+ bool validIdent(size_t i, char n) {
311
+ return isalpha(n) || n == '_' || (i > 0 && isdigit(n));
312
+ }
313
+
314
+ // 1. skip whitespace
315
+ // 2. handle comment or newline
316
+ //
317
+ bool isNumber(c10::string_view str, size_t start, size_t* len) {
318
+ char first = str[start];
319
+ // strtod allows numbers to start with + or - or nan or inf
320
+ // http://en.cppreference.com/w/cpp/string/byte/strtof
321
+ // but we want only the number part, otherwise 1+3 will turn into two
322
+ // adjacent numbers in the lexer
323
+ if (first == '-' || first == '+' || isalpha(first))
324
+ return false;
325
+ const char* startptr = str.data() + start;
326
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
327
+ char* endptr;
328
+ torch::jit::strtod_c(startptr, &endptr);
329
+ *len = endptr - startptr;
330
+ // check if the number is complex valued
331
+ // access is safe because string is assumed to be null terminated
332
+ if (endptr != nullptr && *endptr == 'j') {
333
+ *len += 1;
334
+ }
335
+ return *len > 0;
336
+ }
337
+
338
+ bool isCharCount(char c, c10::string_view str, size_t start, int len) {
339
+ // count checks from [start, start + len)
340
+ return start + len <= str.size() &&
341
+ std::count(str.begin() + start, str.begin() + start + len, c) == len;
342
+ }
343
+
344
+ // python concatenates all adjacent strings "a" "b" == "ab"
345
+ // strings can be enclosed with 1 or 3 single or double quotes
346
+ // if enclosed with 3 quotes newlines are valid
347
+ // as elsewhere, backslash and new line should be ignored
348
+ bool isString(c10::string_view str, size_t start, size_t* len) {
349
+ char quote = str[start];
350
+ if (quote != '\"' && quote != '\'')
351
+ return false;
352
+ int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1;
353
+
354
+ // end is now set past the opening quotation marks
355
+ size_t end = start + quote_len;
356
+ while (end < str.size() && !isCharCount(quote, str, end, quote_len)) {
357
+ if (str[end] == '\n' && quote_len != 3) {
358
+ return false;
359
+ }
360
+ // handle escaped characters. advances past escaped quotation marks,
361
+ // escaped newlines and escaped backslashes
362
+ // multi-char escapes like \x1A are handled fine here because the
363
+ // remainder of the escape are valid string characters anyway
364
+ if (str[end] == '\\') {
365
+ end++;
366
+ }
367
+ end++;
368
+ }
369
+ // set length equal to the complete string including quotations
370
+ *len = end - start + quote_len;
371
+ // if end finished without going past the last character of the string than
372
+ // there is a match
373
+ return end < str.size();
374
+ }
375
+
376
+ bool isblank(int n) {
377
+ return isspace(n) && n != '\n';
378
+ }
379
+
380
+ bool isTypeComment(StringCordView::Iterator str_iter) {
381
+ c10::string_view rest_line = str_iter.rest_line();
382
+ const std::string type_string = "# type:";
383
+ if (rest_line.size() < type_string.length()) {
384
+ return false;
385
+ }
386
+ auto match_string = rest_line.substr(0, type_string.size());
387
+ return match_string == type_string;
388
+ }
389
+
390
+ // Make an exception ignoring comments for type annotation comments
391
+ bool isTypeComment(StringCordView str, size_t pos) {
392
+ const std::string type_string = "# type:";
393
+ if (str.size() < pos + type_string.length()) {
394
+ return false;
395
+ }
396
+ auto match_string = str.substr(pos, type_string.size());
397
+ return match_string == type_string;
398
+ }
399
+
400
+ TokenTrieRef head;
401
+ };
402
+
403
+ TORCH_API SharedParserData& sharedParserData();
404
+
405
+ struct Token {
406
+ int kind;
407
+ SourceRange range;
408
+ Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {}
409
+ std::string text() {
410
+ return std::string(range.token_text());
411
+ }
412
+ std::string kindString() const {
413
+ return kindToString(kind);
414
+ }
415
+ };
416
+
417
+ struct Lexer {
418
+ explicit Lexer(std::shared_ptr<Source> source)
419
+ : source(std::move(source)),
420
+ pos(0),
421
+ nesting(0),
422
+ indent_stack(),
423
+ next_tokens(),
424
+ shared(sharedParserData()) {
425
+ auto first_indent = lexRaw(true);
426
+ indent_stack.push_back(first_indent.range.size());
427
+ lex();
428
+ }
429
+ // Return the current token, and then move to the next one
430
+ Token next() {
431
+ if (next_tokens.empty())
432
+ reportError("Lexer invariant violated: empty token queue");
433
+ Token r = std::move(next_tokens.front());
434
+ next_tokens.erase(next_tokens.begin());
435
+ if (next_tokens.empty()) {
436
+ lex();
437
+ }
438
+ return r;
439
+ }
440
+ // Skip the current token if it matches the given kind
441
+ bool nextIf(int kind) {
442
+ if (cur().kind != kind)
443
+ return false;
444
+ next();
445
+ return true;
446
+ }
447
+
448
+ [[noreturn]] void reportError(const std::string& what) {
449
+ reportError(what, cur());
450
+ }
451
+ [[noreturn]] void reportError(const std::string& what, const Token& t) {
452
+ std::stringstream ss;
453
+ ss << what << ":\n";
454
+ t.range.highlight(ss);
455
+ throw std::runtime_error(ss.str());
456
+ }
457
+ [[noreturn]] void expected(const std::string& what, const Token& t) {
458
+ std::stringstream ss;
459
+ ss << "expected " << what << " but found '" << t.kindString()
460
+ << "' here:\n";
461
+ t.range.highlight(ss);
462
+ throw std::runtime_error(ss.str());
463
+ }
464
+ [[noreturn]] void expected(const std::string& what) {
465
+ expected(what, cur());
466
+ }
467
+ // Check that the current token has a given kind, return the current token,
468
+ // and advance to the next one.
469
+ Token expect(int kind) {
470
+ if (cur().kind != kind) {
471
+ expected(kindToString(kind));
472
+ }
473
+ return next();
474
+ }
475
+ Token& lookahead() {
476
+ if (next_tokens.size() < 2) {
477
+ lex();
478
+ }
479
+ return next_tokens[1];
480
+ }
481
+ Token& cur() {
482
+ return next_tokens.front();
483
+ }
484
+
485
+ private:
486
+ void lex() {
487
+ auto r = lexRaw();
488
+ switch (r.kind) {
489
+ case '(':
490
+ case '[':
491
+ case '{':
492
+ nesting++;
493
+ break;
494
+ case ')':
495
+ case ']':
496
+ case '}':
497
+ nesting--;
498
+ break;
499
+ case TK_WHITESPACE:
500
+ case TK_WHITESPACE_EOF: {
501
+ const auto depth = static_cast<int64_t>(
502
+ r.kind == TK_WHITESPACE_EOF ? indent_stack.front()
503
+ : r.range.size());
504
+ // note: TK_WHITESPACE_EOF is whitespace right before the EOF token
505
+ // just like we allow the code to be indented to a particular initial
506
+ // indent level, we allow the final indent to be anything and set
507
+ // it back to the initial indent level. This allows the code to be
508
+ // put into string literals inside code without worrying about final
509
+ // whitespace
510
+ if (depth > indent_stack.back()) {
511
+ indent_stack.push_back(depth);
512
+ r.kind = TK_INDENT;
513
+ } else if (depth == indent_stack.back()) {
514
+ r.kind = TK_NEWLINE;
515
+ } else {
516
+ next_tokens.emplace_back(TK_NEWLINE, r.range);
517
+ while (indent_stack.back() != depth) {
518
+ indent_stack.pop_back();
519
+ next_tokens.emplace_back(TK_DEDENT, r.range);
520
+ if (indent_stack.empty()) {
521
+ reportError("invalid indent level " + std::to_string(depth), r);
522
+ }
523
+ }
524
+ return; // We've already queued the tokens
525
+ }
526
+ } break;
527
+ default:
528
+ break;
529
+ }
530
+ next_tokens.push_back(std::move(r));
531
+ }
532
+ Token lexRaw(bool whitespace_token = false) {
533
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
534
+ int kind;
535
+ AT_ASSERT(source);
536
+ if (current == nullptr) {
537
+ AT_ASSERT(pos == 0);
538
+ current = std::make_unique<StringCordView::Iterator>(
539
+ source->text_str().begin());
540
+ }
541
+
542
+ StringCordView::Iterator start_iter = *current;
543
+ StringCordView::Iterator end_iter = *current;
544
+ if (!shared.match(
545
+ *current,
546
+ nesting > 0,
547
+ whitespace_token,
548
+ &kind,
549
+ &start_iter,
550
+ &end_iter)) {
551
+ expected(
552
+ "a valid token",
553
+ Token(
554
+ **current,
555
+ SourceRange(source, start_iter, start_iter.pos() + 1)));
556
+ }
557
+
558
+ auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos()));
559
+ pos = end_iter.pos();
560
+ *current = end_iter;
561
+ return t;
562
+ }
563
+
564
+ std::shared_ptr<Source> source;
565
+ std::unique_ptr<StringCordView::Iterator> current;
566
+ size_t pos;
567
+ size_t nesting; // depth of ( [ { nesting...
568
+ std::vector<int> indent_stack; // stack of indentation level of blocks
569
+ // Invariant: this should always contain at least a single element
570
+ std::vector<Token> next_tokens;
571
+ SharedParserData& shared;
572
+ };
573
+ } // namespace jit
574
+ } // namespace torch
575
+
576
+ C10_CLANG_DIAGNOSTIC_POP()
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ // Simple data structure for containing a type T in nested control blocks
10
+ // Should only be used after initial compilation where type checking and
11
+ // loads and stores are emitted
12
+
13
+ template <typename T>
14
+ struct MiniEnvironment {
15
+ MiniEnvironment(Block* b, std::shared_ptr<MiniEnvironment> next = nullptr)
16
+ : next(std::move(next)) {}
17
+
18
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
19
+ std::shared_ptr<MiniEnvironment<T>> next;
20
+
21
+ T findInThisFrame(const std::string& name) {
22
+ auto it = table.find(name);
23
+ if (it != table.end()) {
24
+ return it->second;
25
+ }
26
+ return nullptr;
27
+ }
28
+
29
+ T findInAnyFrame(const std::string& name) {
30
+ for (auto runner = this; runner; runner = runner->next.get()) {
31
+ if (auto r = runner->findInThisFrame(name)) {
32
+ return r;
33
+ }
34
+ }
35
+ return nullptr;
36
+ }
37
+
38
+ void setVar(const std::string& name, T value) {
39
+ table[name] = value;
40
+ }
41
+
42
+ std::vector<std::string> definedVariables() {
43
+ std::vector<std::string> result;
44
+ result.reserve(table.size());
45
+ for (auto& kv : table) {
46
+ result.push_back(kv.first);
47
+ }
48
+ std::sort(result.begin(), result.end());
49
+ return result;
50
+ }
51
+
52
+ private:
53
+ std::unordered_map<std::string, T> table;
54
+ };
55
+
56
+ } // namespace jit
57
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/qualified_name.h>
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ /**
10
+ * class NameMangler
11
+ *
12
+ * Utility to mangle qualified names in order to make them unique. We use this
13
+ * in various places where we to de-duplicate qualified names.
14
+ */
15
+ class TORCH_API NameMangler {
16
+ public:
17
+ // Given a qualified name, return a mangled version that is guaranteed to be
18
+ // unique with respect to previous/future calls of `mangled()` on this name
19
+ // mangler instance.
20
+ c10::QualifiedName mangle(const c10::QualifiedName& name);
21
+
22
+ private:
23
+ size_t mangleIndex_ = 0;
24
+ };
25
+
26
+ } // namespace jit
27
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/lexer.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ inline bool isCharCount(char c, const std::string& str, size_t start, int len) {
10
+ // count checks from [start, start + len)
11
+ return start + len <= str.size() &&
12
+ std::count(str.begin() + start, str.begin() + start + len, c) == len;
13
+ }
14
+
15
+ inline c10::optional<char> parseOctal(const std::string& str, size_t pos) {
16
+ //\xxx where x are 0-7
17
+ if (pos + 3 >= str.size())
18
+ return c10::nullopt;
19
+ size_t c = 0;
20
+ for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) {
21
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
22
+ int d = str[pos + i];
23
+ if (d < '0' || d > '7')
24
+ return c10::nullopt;
25
+ c += b * (d - '0');
26
+ }
27
+ if (c >= 256)
28
+ return c10::nullopt;
29
+ return c;
30
+ }
31
+
32
+ inline std::string parseStringLiteral(
33
+ const SourceRange& range,
34
+ const std::string& str) {
35
+ int quote_len = isCharCount(str[0], str, 0, 3) ? 3 : 1;
36
+ auto ret_str = str.substr(quote_len, str.size() - quote_len * 2);
37
+ size_t pos = ret_str.find('\\');
38
+ while (pos != std::string::npos) {
39
+ // invariant: pos has to escape a character because it is a valid string
40
+ char c = ret_str[pos + 1];
41
+ size_t to_erase = 2;
42
+ switch (ret_str[pos + 1]) {
43
+ case '\\':
44
+ case '\'':
45
+ case '\"':
46
+ case '\n':
47
+ break;
48
+ case 'a':
49
+ c = '\a';
50
+ break;
51
+ case 'b':
52
+ c = '\b';
53
+ break;
54
+ case 'f':
55
+ c = '\f';
56
+ break;
57
+ case 'n':
58
+ c = '\n';
59
+ break;
60
+ case 'v':
61
+ c = '\v';
62
+ break;
63
+ case 't':
64
+ c = '\t';
65
+ break;
66
+ case 'x':
67
+ throw ErrorReport(range) << "unsupported hex specifier";
68
+ case 'u':
69
+ case 'U':
70
+ throw ErrorReport(range) << "unsupported unicode specifier";
71
+ default:
72
+ // octal value in format \nnn, n is [0-7]
73
+ if (auto v = parseOctal(ret_str, pos)) {
74
+ to_erase = 4;
75
+ c = *v;
76
+ } else {
77
+ throw ErrorReport(range) << " ill formed octal specifier";
78
+ }
79
+ }
80
+ ret_str.replace(pos, to_erase, /* num copies */ 1, c);
81
+ pos = ret_str.find('\\', pos + 1);
82
+ }
83
+ return ret_str;
84
+ }
85
+
86
+ } // namespace jit
87
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <torch/csrc/jit/frontend/tree.h>
4
+ #include <torch/csrc/jit/frontend/tree_views.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Decl;
11
+ struct ParserImpl;
12
+ struct Lexer;
13
+
14
+ TORCH_API Decl mergeTypesFromTypeComment(
15
+ const Decl& decl,
16
+ const Decl& type_annotation_decl,
17
+ bool is_method);
18
+
19
+ struct TORCH_API Parser {
20
+ explicit Parser(const std::shared_ptr<Source>& src);
21
+ TreeRef parseFunction(bool is_method);
22
+ TreeRef parseClass();
23
+ Decl parseTypeComment();
24
+ Expr parseExp();
25
+ Lexer& lexer();
26
+ ~Parser();
27
+
28
+ private:
29
+ std::unique_ptr<ParserImpl> pImpl;
30
+ };
31
+
32
+ } // namespace jit
33
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~";
6
+ } // namespace jit
7
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <ATen/core/qualified_name.h>
5
+ #include <torch/csrc/jit/frontend/sugared_value.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Resolver;
11
+ using ResolverPtr = std::shared_ptr<Resolver>;
12
+
13
+ /**
14
+ * class Resolver
15
+ *
16
+ * Represents an "outer environment" in which we an look up names and return
17
+ * a corresponding SugaredValue. This is used during compilation to resolve
18
+ * references to names which are not defined internal to the graph.
19
+ *
20
+ * Example: PythonResolver looks at the enclosing Python scope for `name`.
21
+ *
22
+ * NOTE: When adding methods, keep this an abstract class (i.e. all new methods
23
+ * should be purely virtual). Resist the urge to provide a default
24
+ * implementation; you should explicitly think about how each resolver would
25
+ * handle the method.
26
+ */
27
+ struct Resolver {
28
+ virtual ~Resolver() = default;
29
+
30
+ // Resolve a given name to a SugaredValue. This takes the method `m` that the
31
+ // caller is currently constructing, since we may need to insert nodes into
32
+ // the graph to create a value.
33
+ virtual std::shared_ptr<SugaredValue> resolveValue(
34
+ const std::string& name,
35
+ GraphFunction& m,
36
+ const SourceRange& loc) {
37
+ return nullptr;
38
+ }
39
+
40
+ // Resolve `name` to a TypePtr.
41
+ virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) {
42
+ return nullptr;
43
+ }
44
+ };
45
+
46
+ // A resolver that only understands "torch.foo()" lookups.
47
+ struct NativeResolver : public Resolver {
48
+ std::shared_ptr<SugaredValue> resolveValue(
49
+ const std::string& name,
50
+ GraphFunction& m,
51
+ const SourceRange& loc) override {
52
+ if (name == "torch") {
53
+ return std::make_shared<BuiltinModule>("aten");
54
+ }
55
+ return nullptr;
56
+ }
57
+
58
+ TypePtr resolveType(const std::string& name, const SourceRange& loc)
59
+ override {
60
+ return nullptr;
61
+ }
62
+ };
63
+
64
+ inline std::shared_ptr<NativeResolver> nativeResolver() {
65
+ return std::make_shared<NativeResolver>();
66
+ }
67
+ } // namespace jit
68
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/ir/named_value.h>
5
+
6
+ #include <ATen/core/function_schema.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ // Try to match a list of inputs and keyword 'attributes' to this
12
+ // schema. Return the flat list of positional inputs to the call or
13
+ // `c10::nullopt` on failure (`failure_messages` contains a good error
14
+ // report in this case)
15
+
16
+ struct MatchedSchema {
17
+ std::vector<Value*> inputs;
18
+ std::vector<TypePtr> return_types;
19
+ c10::OptNameList return_field_names;
20
+ std::string schema_name;
21
+ };
22
+
23
+ TORCH_API bool isBlockListedSchema(const FunctionSchema& schema);
24
+
25
+ TORCH_API MatchedSchema matchSchema(
26
+ const ::c10::FunctionSchema& schema,
27
+ const SourceRange& loc,
28
+ Graph& graph,
29
+ at::ArrayRef<NamedValue> args,
30
+ at::ArrayRef<NamedValue> kwargs,
31
+ const c10::optional<NamedValue>& self = c10::nullopt);
32
+
33
+ TORCH_API std::pair<size_t, MatchedSchema> matchSchemas(
34
+ const std::vector<const ::c10::FunctionSchema*>& schemas,
35
+ const SourceRange& loc,
36
+ Graph& graph,
37
+ at::ArrayRef<NamedValue> args,
38
+ at::ArrayRef<NamedValue> kwargs,
39
+ const c10::optional<NamedValue>& self = c10::nullopt,
40
+ bool render_errors = false);
41
+
42
+ TORCH_API bool convertibleToList(
43
+ const TypePtr& type,
44
+ const TypePtr& list_type_);
45
+
46
+ TORCH_API std::string getFullSchemaName(const ::c10::FunctionSchema& schema);
47
+
48
+ TORCH_API Value* emitBuiltinCall(
49
+ const SourceRange& loc,
50
+ Graph& graph,
51
+ Symbol name,
52
+ at::ArrayRef<NamedValue> args,
53
+ at::ArrayRef<NamedValue> kwargs,
54
+ const c10::optional<NamedValue>& self = c10::nullopt);
55
+
56
+ TORCH_API c10::optional<size_t> findInputWithName(
57
+ const std::string& name,
58
+ at::ArrayRef<NamedValue> kwargs,
59
+ bool is_aten = false);
60
+
61
+ // applies implicit conversion from value trying to turn it into type
62
+ // concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type)
63
+ TORCH_API Value* tryConvertToType(
64
+ const SourceRange& loc,
65
+ Graph& graph,
66
+ const TypePtr& concrete_type,
67
+ Value* value,
68
+ bool allow_conversions);
69
+ } // namespace jit
70
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/alias_info.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/FunctionRef.h>
7
+ #include <torch/csrc/jit/frontend/lexer.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ using TypePtr = c10::TypePtr;
13
+
14
+ struct TORCH_API SchemaTypeParser {
15
+ TypePtr parseBaseType();
16
+ c10::optional<c10::AliasInfo> parseAliasAnnotation();
17
+ std::pair<TypePtr, c10::optional<c10::AliasInfo>> parseType();
18
+ std::tuple</*fake*/ TypePtr, /*real*/ TypePtr, c10::optional<c10::AliasInfo>>
19
+ parseFakeAndRealType();
20
+ c10::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
21
+ TypePtr parseRefinedTensor();
22
+
23
+ SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types)
24
+ : complete_tensor_types(parse_complete_tensor_types), L(L) {}
25
+
26
+ private:
27
+ c10::optional<bool> tryToParseRequiresGrad();
28
+ c10::optional<c10::Device> tryToParseDeviceType();
29
+ void parseList(
30
+ int begin,
31
+ int sep,
32
+ int end,
33
+ c10::function_ref<void()> callback);
34
+
35
+ bool complete_tensor_types;
36
+ Lexer& L;
37
+ size_t next_id = 0;
38
+ };
39
+ } // namespace jit
40
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/frontend/resolver.h>
5
+ #include <torch/csrc/jit/frontend/tree_views.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /**
11
+ * class ScriptTypeParser
12
+ *
13
+ * Parses expressions in our typed AST format (TreeView) into types and
14
+ * typenames.
15
+ */
16
+ class TORCH_API ScriptTypeParser {
17
+ public:
18
+ explicit ScriptTypeParser() = default;
19
+ explicit ScriptTypeParser(ResolverPtr resolver)
20
+ : resolver_(std::move(resolver)) {}
21
+
22
+ c10::TypePtr parseTypeFromExpr(const Expr& expr) const;
23
+
24
+ c10::optional<std::pair<c10::TypePtr, int32_t>> parseBroadcastList(
25
+ const Expr& expr) const;
26
+
27
+ c10::TypePtr parseType(const std::string& str);
28
+
29
+ FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self);
30
+
31
+ c10::IValue parseClassConstant(const Assign& assign);
32
+
33
+ private:
34
+ c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const;
35
+
36
+ c10::optional<std::string> parseBaseTypeName(const Expr& expr) const;
37
+ at::TypePtr subscriptToType(
38
+ const std::string& typeName,
39
+ const Subscript& subscript) const;
40
+ std::vector<IValue> evaluateDefaults(
41
+ const SourceRange& r,
42
+ const std::vector<Expr>& default_types,
43
+ const std::vector<Expr>& default_exprs);
44
+ std::vector<Argument> parseArgsFromDecl(const Decl& decl, bool skip_self);
45
+
46
+ std::vector<Argument> parseReturnFromDecl(const Decl& decl);
47
+
48
+ ResolverPtr resolver_ = nullptr;
49
+
50
+ // Need to use `evaluateDefaults` in serialization
51
+ friend struct ConstantTableValue;
52
+ friend struct SourceImporterImpl;
53
+ };
54
+ } // namespace jit
55
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <c10/util/Optional.h>
4
+
5
+ #include <algorithm>
6
+ #include <iterator>
7
+ #include <memory>
8
+ #include <numeric>
9
+ #include <ostream>
10
+ #include <regex>
11
+ #include <sstream>
12
+ #include <unordered_map>
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ class SourceRangeUnpickler;
18
+ struct SourceRange;
19
+
20
+ // A stringlike class backed by a vector of string_view
21
+ // the string represented are logically the concatenation of the string_views
22
+ // This has advantage of not needing continues memory.
23
+ struct TORCH_API StringCordView {
24
+ StringCordView();
25
+ StringCordView(const StringCordView&) = default;
26
+ StringCordView(StringCordView&&) noexcept = default;
27
+ StringCordView(
28
+ std::vector<c10::string_view> inputs,
29
+ std::vector<std::shared_ptr<std::string>> ownerships);
30
+
31
+ StringCordView& operator=(const StringCordView&) = default;
32
+ StringCordView& operator=(StringCordView&&) noexcept = default;
33
+
34
+ size_t size() const {
35
+ return accumulated_sizes_.back();
36
+ }
37
+
38
+ size_t find(const std::string& tok, size_t start) const;
39
+ size_t find_regex(const std::string& tok, size_t start) const;
40
+ StringCordView substr(size_t start, size_t size) const;
41
+
42
+ char at(size_t index) const {
43
+ return *iter_for_pos(index);
44
+ }
45
+ char operator[](size_t index) const {
46
+ return at(index);
47
+ }
48
+
49
+ std::string str() const {
50
+ std::stringstream ss;
51
+ for (auto s : pieces_) {
52
+ ss << std::string(s);
53
+ }
54
+ return ss.str();
55
+ }
56
+
57
+ bool operator==(const std::string& rhs) const;
58
+
59
+ bool operator==(const StringCordView& rhs) const;
60
+
61
+ c10::string_view piece(size_t index) const {
62
+ return pieces_[index];
63
+ }
64
+
65
+ struct Iterator {
66
+ Iterator(
67
+ const StringCordView* str,
68
+ size_t start_line,
69
+ size_t start_pos,
70
+ size_t size)
71
+ : line_(start_line), pos_(start_pos), str_(str), size_(size) {}
72
+ explicit Iterator(const StringCordView* str)
73
+ : Iterator(str, 0, 0, str->size()) {}
74
+
75
+ Iterator() : Iterator(nullptr, 0, 0, 0) {}
76
+
77
+ Iterator(const Iterator&) = default;
78
+ Iterator(Iterator&&) = default;
79
+ Iterator& operator=(const Iterator&) = default;
80
+ Iterator& operator=(Iterator&&) = default;
81
+
82
+ Iterator operator++() {
83
+ if (size_ == 0) {
84
+ return *this;
85
+ }
86
+ if ((pos_ + 1) < str_->pieces_[line_].size()) {
87
+ pos_++;
88
+ } else {
89
+ line_++;
90
+ pos_ = 0;
91
+ }
92
+ return *this;
93
+ }
94
+
95
+ Iterator operator++(int) {
96
+ Iterator prev(*this);
97
+ ++(*this);
98
+ return prev;
99
+ }
100
+
101
+ Iterator next_iter() const {
102
+ Iterator next(*this);
103
+ ++next;
104
+ return next;
105
+ }
106
+
107
+ Iterator& operator+=(size_t num) {
108
+ if (!has_next()) {
109
+ return *this;
110
+ }
111
+ size_t target_pos = pos_ + num;
112
+ if (target_pos >= str_->accumulated_sizes_[line_] &&
113
+ (line_ + 1) < str_->accumulated_sizes_.size() &&
114
+ target_pos < str_->accumulated_sizes_[line_ + 1]) {
115
+ pos_ = target_pos;
116
+ return *this;
117
+ }
118
+
119
+ size_t target_abs_pos = pos() + num;
120
+ *this = str_->iter_for_pos(target_abs_pos);
121
+ return *this;
122
+ }
123
+
124
+ bool operator==(const Iterator& rhs) const {
125
+ if (!has_next() && !rhs.has_next()) {
126
+ return true;
127
+ }
128
+ return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_);
129
+ }
130
+ bool operator!=(const Iterator& rhs) {
131
+ return !((*this) == rhs);
132
+ }
133
+ bool has_next() const {
134
+ return size_ > 0 && (line_ < str_->pieces_.size());
135
+ }
136
+
137
+ char operator*() const {
138
+ TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size());
139
+ TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size());
140
+ return str_->pieces_[line_].at(pos_);
141
+ }
142
+
143
+ // returns rest of the line of the current iterator
144
+ c10::string_view rest_line() const {
145
+ if (line_ >= str_->pieces_.size()) {
146
+ return "";
147
+ }
148
+
149
+ c10::string_view cur_line = str_->pieces_[line_];
150
+ return cur_line.substr(pos_, std::string::npos);
151
+ }
152
+
153
+ size_t pos() const {
154
+ if (size_ == 0) {
155
+ return 0;
156
+ }
157
+ return str_->accumulated_sizes_[line_] + pos_;
158
+ }
159
+
160
+ private:
161
+ size_t line_;
162
+ size_t pos_;
163
+ const StringCordView* str_;
164
+ size_t size_;
165
+ friend struct StringCordView;
166
+ };
167
+
168
+ Iterator begin() const {
169
+ return Iterator(this, 0, 0, size());
170
+ }
171
+ Iterator end() const {
172
+ return Iterator(this, pieces_.size(), 0, 0);
173
+ }
174
+ Iterator iter_for_pos(size_t pos) const;
175
+
176
+ private:
177
+ std::vector<c10::string_view> pieces_;
178
+ std::vector<size_t> accumulated_sizes_;
179
+ std::vector<std::shared_ptr<std::string>> owned_strings_;
180
+ };
181
+
182
+ // Source represents a code segment. It keeps track of:
183
+ // - text_view : the view into text of the code segment
184
+ // - filename (optional) : if present, represents the name of the file from
185
+ // which the code segment originated.
186
+ // - starting_line_no : represents the line in the original file where the
187
+ // code segment started.
188
+ struct TORCH_API Source {
189
+ // Whether or not Source should copy the string passed in the constructor.
190
+ enum CopiesString { COPIES_STRING, DONT_COPY };
191
+
192
+ explicit Source(
193
+ c10::string_view text_view,
194
+ c10::optional<std::string> filename = c10::nullopt,
195
+ size_t starting_line_no = 0,
196
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr,
197
+ CopiesString copies_str = COPIES_STRING)
198
+ : filename_(std::move(filename)),
199
+ starting_line_no_(starting_line_no),
200
+ gen_ranges_(std::move(gen_ranges)) {
201
+ if (copies_str == COPIES_STRING) {
202
+ std::shared_ptr<std::string> allocated_str =
203
+ std::make_shared<std::string>(text_view.data(), text_view.size());
204
+ text_view_ = StringCordView({*allocated_str}, {allocated_str});
205
+ } else {
206
+ text_view_ = StringCordView({text_view}, {});
207
+ }
208
+
209
+ calc_line_start_offsets();
210
+ }
211
+
212
+ explicit Source(
213
+ StringCordView str,
214
+ c10::optional<std::string> filename = c10::nullopt,
215
+ size_t starting_line_no = 0,
216
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
217
+ : text_view_(std::move(str)),
218
+ filename_(std::move(filename)),
219
+ starting_line_no_(starting_line_no),
220
+ gen_ranges_(std::move(gen_ranges)) {
221
+ calc_line_start_offsets();
222
+ }
223
+ // Given a line number (within source_), return the byte offset of the
224
+ // beginning of that line.
225
+ size_t offset_for_line(size_t line) const {
226
+ return line_starting_offsets_.at(line);
227
+ }
228
+
229
+ // Returns number of lines present.
230
+ size_t num_lines() const {
231
+ return line_starting_offsets_.size();
232
+ }
233
+
234
+ // Calculate the line (within the code segment) on which `offset` resides.
235
+ size_t lineno_for_offset(size_t offset) const {
236
+ auto iter = std::upper_bound(
237
+ line_starting_offsets_.begin(), line_starting_offsets_.end(), offset);
238
+ return iter - line_starting_offsets_.begin() - 1;
239
+ }
240
+
241
+ // Calculate the line (within the original source file, if present) on which
242
+ // `lineno` resides.
243
+ size_t lineno_to_source_lineno(size_t lineno) const {
244
+ if (filename_) {
245
+ return lineno + starting_line_no_;
246
+ } else {
247
+ return lineno;
248
+ }
249
+ }
250
+
251
+ StringCordView get_line(size_t lineno) const {
252
+ auto start = offset_for_line(lineno);
253
+ auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start
254
+ : text_view_.size() - start;
255
+ return text_view_.substr(start, size);
256
+ }
257
+
258
+ const StringCordView& text_str() const {
259
+ return text_view_;
260
+ }
261
+
262
+ char char_at(size_t index) const {
263
+ return text_view_.at(index);
264
+ }
265
+
266
+ size_t size() const {
267
+ return text_view_.size();
268
+ }
269
+
270
+ c10::optional<std::string>& filename() {
271
+ return filename_;
272
+ }
273
+
274
+ size_t starting_line_no() const {
275
+ return starting_line_no_;
276
+ }
277
+
278
+ c10::optional<SourceRange> findSourceRangeThatGenerated(
279
+ const SourceRange& range);
280
+
281
+ ~Source() = default;
282
+
283
+ private:
284
+ void calc_line_start_offsets() {
285
+ line_starting_offsets_.clear();
286
+ line_starting_offsets_.push_back(0);
287
+ size_t pos = 0;
288
+ while ((pos = text_view_.find("\n", pos)) != std::string::npos) {
289
+ line_starting_offsets_.push_back(++pos);
290
+ }
291
+ }
292
+
293
+ StringCordView text_view_;
294
+
295
+ c10::optional<std::string> filename_;
296
+ // If filename_ is not present, starting_line_no_ is don't care
297
+ size_t starting_line_no_;
298
+ // Starting offsets for lines into the source. e.g. line 0 starts at
299
+ // line_starting_offsets_[0], etc.
300
+ std::vector<size_t> line_starting_offsets_;
301
+
302
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges_;
303
+ };
304
+
305
+ // A SourceRange is a reference to subset of a Source, specified by `start` and
306
+ // `end` byte offsets into the source text.
307
+ struct TORCH_API SourceRange {
308
+ SourceRange(std::shared_ptr<Source> source_view, size_t start_, size_t end_)
309
+ : source_view_(std::move(source_view)), start_(start_), end_(end_) {
310
+ if (source_view_) {
311
+ start_iter_ = source_view_->text_str().iter_for_pos(start_);
312
+ }
313
+ }
314
+
315
+ SourceRange() : source_view_(nullptr), start_(0), end_(0) {}
316
+
317
+ SourceRange(
318
+ std::shared_ptr<Source> source_view_,
319
+ StringCordView::Iterator start_iter,
320
+ size_t end_)
321
+ : source_view_(std::move(source_view_)),
322
+ start_(start_iter.pos()),
323
+ end_(end_),
324
+ start_iter_(start_iter) {}
325
+
326
+ const c10::string_view token_text() const {
327
+ size_t size = end() - start();
328
+ return start_iter_.rest_line().substr(0, size);
329
+ }
330
+
331
+ const StringCordView text() const {
332
+ return source_view_->text_str().substr(start(), end() - start());
333
+ }
334
+ size_t size() const {
335
+ return end() - start();
336
+ }
337
+ static const size_t CONTEXT = 3;
338
+ void highlight(std::ostream& out) const;
339
+
340
+ // Customizable version of 'highlight' method.
341
+ void print_with_context(
342
+ std::ostream& out,
343
+ size_t context,
344
+ bool highlight,
345
+ const std::string& funcname) const;
346
+
347
+ const std::shared_ptr<Source>& source() const {
348
+ return source_view_;
349
+ }
350
+ size_t start() const {
351
+ return start_;
352
+ }
353
+ size_t end() const {
354
+ return end_;
355
+ }
356
+ std::string str() const {
357
+ std::stringstream ss;
358
+ highlight(ss);
359
+ return ss.str();
360
+ }
361
+
362
+ c10::optional<std::tuple<std::string, size_t, size_t>> file_line_col() const {
363
+ if (!source_view_ || !source()->filename()) {
364
+ return c10::nullopt;
365
+ }
366
+
367
+ auto lineno = source_view_->lineno_for_offset(start_);
368
+ auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno);
369
+ // TODO: c10::optional<>::value returns an rvalue ref so can't use it here??
370
+ return std::make_tuple<std::string, size_t, size_t>(
371
+ source_view_->filename().value_or(""),
372
+ source_view_->lineno_to_source_lineno(lineno),
373
+ (size_t)col_offset);
374
+ }
375
+
376
+ bool operator==(const SourceRange& rhs) const {
377
+ return start() == rhs.start() && end() == rhs.end() &&
378
+ source() == rhs.source();
379
+ }
380
+
381
+ bool operator!=(const SourceRange& rhs) const {
382
+ return !(*this == rhs);
383
+ }
384
+
385
+ c10::optional<SourceRange> findSourceRangeThatGenerated() const {
386
+ if (!source_view_) {
387
+ return c10::nullopt;
388
+ }
389
+ return source_view_->findSourceRangeThatGenerated(*this);
390
+ }
391
+
392
+ protected:
393
+ std::shared_ptr<Source> source_view_;
394
+
395
+ private:
396
+ size_t start_;
397
+ size_t end_;
398
+ StringCordView::Iterator start_iter_;
399
+ };
400
+
401
+ // OwnedSourceRange is just like a SourceRange except that it owns a `Source`
402
+ // instead of `Source`. Thus OwnedSourceRange owns a copy of source text.
403
+ struct OwnedSourceRange : public SourceRange {
404
+ explicit OwnedSourceRange(const SourceRange& source_range)
405
+ : SourceRange(source_range) {
406
+ const auto& source = source_range.source();
407
+ if (source) {
408
+ source_view_ = std::make_shared<Source>(
409
+ source->text_str().str(),
410
+ source->filename(),
411
+ source->starting_line_no());
412
+ }
413
+ }
414
+ };
415
+
416
+ struct TORCH_API SourceRangeHasher {
417
+ public:
418
+ size_t operator()(const torch::jit::SourceRange& key) const;
419
+ };
420
+
421
+ struct StackEntry {
422
+ std::string filename;
423
+ SourceRange range;
424
+ };
425
+
426
+ TORCH_API void format_stack_trace(
427
+ std::ostream& out,
428
+ const std::vector<StackEntry>& entries);
429
+
430
+ inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) {
431
+ range.highlight(out);
432
+ return out;
433
+ }
434
+
435
+ // A pair of (byte offset, SourceRange) describing a specific segment
436
+ // of the output stream
437
+ struct TaggedRange {
438
+ TaggedRange(size_t bytes, SourceRange range)
439
+ : bytes(bytes), range(std::move(range)) {}
440
+ size_t bytes;
441
+ SourceRange range;
442
+ };
443
+ using SourceRangeRecords = std::vector<TaggedRange>;
444
+ using SourceRangeTagMap =
445
+ std::unordered_map<SourceRange, int64_t, SourceRangeHasher>;
446
+
447
+ } // namespace jit
448
+ } // namespace torch
449
+
450
+ namespace std {
451
+ template <>
452
+ struct iterator_traits<torch::jit::StringCordView::Iterator> {
453
+ using value_type = char;
454
+ using difference_type = ptrdiff_t;
455
+ using pointer = char*;
456
+ using reference = char&;
457
+ using iterator_category = std::forward_iterator_tag;
458
+ };
459
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <c10/macros/Export.h>
8
+ #include <torch/csrc/jit/frontend/source_range.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * SourceRef does two things:
15
+ * 1. Owns a Source object.
16
+ * 2. Serves as lookup key to the owned Source in associative containers, for
17
+ * runtime data aggregation.
18
+ * We don't want to use std::shared_ptr<Source> directly because we want to
19
+ * support heteogeneous lookup, and also shared_ptr is an implementation detail
20
+ * which should be encapsulated.
21
+ */
22
+ class TORCH_API SourceRef : public CustomClassHolder {
23
+ public:
24
+ explicit SourceRef(std::shared_ptr<Source> source_view)
25
+ : source_view_(std::move(source_view)) {}
26
+ bool operator==(const SourceRef& other) const {
27
+ return source_view_ == other.source_view_;
28
+ }
29
+ bool operator<(const Source& other) const {
30
+ return source_view_.get() < &other;
31
+ }
32
+ friend bool operator<(const Source& other, const SourceRef& self) {
33
+ return &other < self.source_view_.get();
34
+ }
35
+ bool operator<(const SourceRef& other) const {
36
+ return *this < *other.source_view_.get();
37
+ }
38
+ const Source* operator->() const {
39
+ return source_view_.get();
40
+ }
41
+
42
+ private:
43
+ std::shared_ptr<Source> source_view_;
44
+ };
45
+
46
+ } // namespace jit
47
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API double strtod_c(const char* nptr, char** endptr);
9
+ TORCH_API float strtof_c(const char* nptr, char** endptr);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <utility>
7
+
8
+ #include <ATen/core/symbol.h>
9
+ #include <caffe2/serialize/versions.h>
10
+ #include <torch/csrc/jit/api/module.h>
11
+ #include <torch/csrc/jit/frontend/error_report.h>
12
+ #include <torch/csrc/jit/frontend/schema_matching.h>
13
+ #include <torch/csrc/jit/frontend/versioned_symbols.h>
14
+ #include <torch/csrc/jit/ir/ir.h>
15
+
16
+ namespace torch {
17
+ namespace jit {
18
+
19
+ using SugaredValuePtr = std::shared_ptr<SugaredValue>;
20
+
21
+ // The AST can contain nodes like `self`, `self.b` or `python_fn` that
22
+ // are not first-class values in the graph representation, but instead
23
+ // will be desugared based on how they are used in the AST.
24
+
25
+ // SugaredValue is used to temporarily represent these values in a way
26
+ // that separates their behavior from the AST -> IR converter itself.
27
+ // This allows us to keep dependencies on python minimal.
28
+
29
+ struct TORCH_API SugaredValue
30
+ : public std::enable_shared_from_this<SugaredValue> {
31
+ // what is this node? for error reporting (e.g. Module, python function)
32
+ virtual std::string kind() const = 0;
33
+
34
+ // what can we do with this thing?
35
+ // use it as a value e.g. `this + 4`
36
+ virtual Value* asValue(const SourceRange& loc, GraphFunction& m) {
37
+ throw ErrorReport(loc) << kind() << " cannot be used as a value";
38
+ }
39
+
40
+ // select an attribute on it, e.g. `this.field`
41
+ virtual std::shared_ptr<SugaredValue> attr(
42
+ const SourceRange& loc,
43
+ GraphFunction& m,
44
+ const std::string& field) {
45
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
46
+ }
47
+
48
+ virtual bool hasAttr(
49
+ const SourceRange& loc,
50
+ GraphFunction& m,
51
+ const std::string& field) {
52
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
53
+ }
54
+
55
+ // assign an attribute on it, e.g. `this.field = newValue`
56
+ virtual void setAttr(
57
+ const SourceRange& loc,
58
+ GraphFunction& m,
59
+ const std::string& field,
60
+ Value* newValue) {
61
+ throw ErrorReport(loc) << "attribute assignment is not defined on "
62
+ << kind();
63
+ }
64
+
65
+ // use it as a vector of values, e.g. a tuple of values as return value from
66
+ // a method invocation
67
+ virtual std::vector<std::shared_ptr<SugaredValue>> asTuple(
68
+ const SourceRange& loc,
69
+ GraphFunction& m,
70
+ const c10::optional<size_t>& size_hint = {}) {
71
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuple";
72
+ }
73
+
74
+ // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API
75
+ virtual SugaredValuePtr asTupleValue(
76
+ const SourceRange& loc,
77
+ GraphFunction& m) {
78
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuplevalue";
79
+ }
80
+
81
+ virtual std::vector<std::shared_ptr<SugaredValue>> asType(
82
+ const SourceRange& loc,
83
+ Method& m) {
84
+ throw ErrorReport(loc) << kind() << " cannot be used as a type";
85
+ }
86
+
87
+ // call it like a function, e.g. `outputs = this(inputs)`
88
+ virtual std::shared_ptr<SugaredValue> call(
89
+ const SourceRange& loc,
90
+ GraphFunction& m,
91
+ // note: names for args will be 'argument 0', 'argument 1', etc..
92
+ at::ArrayRef<NamedValue> args,
93
+ at::ArrayRef<NamedValue> kwargs,
94
+ size_t n_binders) {
95
+ // n_binders is always set to the number of variables an expression is
96
+ // syntactically bound to:
97
+ // a = foo() # 1 binder (note in this case the single binder might be a
98
+ // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0
99
+ // binders
100
+ //
101
+ // In subexpressions, like bar() in foo(bar()), n_binders is always set to
102
+ // 1. n_binders is used as a hint to subexpressions to determine how many
103
+ // values they should return when that number is ambiguous statically. In
104
+ // particular it is currently used to decide how many tensors a call to a
105
+ // python function will return. It is only a hint, functions do not have to
106
+ // check that n_binders match the number of things they are returning, the
107
+ // assignment logic will do that anyway.
108
+
109
+ throw ErrorReport(loc) << "cannot call a " << kind();
110
+ }
111
+
112
+ // This function is called when to convert a SugaredValue to its iterator.
113
+ // For example, when iterating through a Dict we iterate over its keys
114
+ virtual std::shared_ptr<SugaredValue> iter(
115
+ const SourceRange& loc,
116
+ GraphFunction& m) {
117
+ throw ErrorReport(loc) << kind() << " cannot be used as an iterable";
118
+ }
119
+
120
+ // If we are iterating over a Sugared Value and it returns a value from this
121
+ // function, then we emit an unrolled loop over the variable. This allows us
122
+ // to support containers of Heterogenous types, like Module Containers &
123
+ // Tuples
124
+ virtual c10::optional<int64_t> staticLen() {
125
+ return c10::nullopt;
126
+ }
127
+
128
+ // When iterating over this SugaredValue, should we emit the for loop as an
129
+ // unrolled loop.
130
+ bool shouldEmitUnrolled() {
131
+ return staticLen() != c10::nullopt;
132
+ }
133
+
134
+ // return length of this thing, if not then it can't be iterated.
135
+ // If it does not have a statically-determinable length, then it cannot
136
+ // be iterated over with a modulelist. If it does it must return a constant
137
+ // Value *
138
+ virtual Value* len(const SourceRange& loc, GraphFunction& m) {
139
+ throw ErrorReport(loc) << "'" << kind() << "'"
140
+ << " object is not iterable";
141
+ }
142
+
143
+ // expression for ith elemement for iterable value
144
+ virtual std::shared_ptr<SugaredValue> getitem(
145
+ const SourceRange& loc,
146
+ GraphFunction& m,
147
+ Value* idx,
148
+ TypePtr type_hint = nullptr) {
149
+ throw ErrorReport(loc) << "'" << kind() << "'"
150
+ << " object is not subscriptable";
151
+ }
152
+
153
+ virtual ~SugaredValue() = default;
154
+ };
155
+
156
+ // most things in the environment are just simple value types
157
+ // and not special python syntax sugar types
158
+ struct TORCH_API SimpleValue : public SugaredValue {
159
+ SimpleValue(Value* value) : value_(value) {}
160
+ std::string kind() const override {
161
+ std::stringstream ss;
162
+ // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
163
+ ss << "value of type '" << value_->type()->annotation_str() << "'";
164
+ return ss.str();
165
+ }
166
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
167
+ return value_;
168
+ }
169
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
170
+ const SourceRange& loc,
171
+ GraphFunction& m,
172
+ const c10::optional<size_t>& size_hint = {}) override;
173
+ std::shared_ptr<SugaredValue> attr(
174
+ const SourceRange& loc,
175
+ GraphFunction& m,
176
+ const std::string& field) override;
177
+
178
+ bool hasAttr(
179
+ const SourceRange& loc,
180
+ GraphFunction& m,
181
+ const std::string& field) override;
182
+
183
+ void setAttr(
184
+ const SourceRange& loc,
185
+ GraphFunction& m,
186
+ const std::string& field,
187
+ Value* newValue) override;
188
+
189
+ std::shared_ptr<SugaredValue> call(
190
+ const SourceRange& loc,
191
+ GraphFunction& m,
192
+ // note: names for args will be 'argument 0', 'argument 1', etc..
193
+ at::ArrayRef<NamedValue> args,
194
+ at::ArrayRef<NamedValue> kwargs,
195
+ size_t n_binders) override;
196
+
197
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
198
+ override;
199
+
200
+ Value* getValue() const {
201
+ return value_;
202
+ }
203
+
204
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
205
+ SugaredValuePtr getitem(
206
+ const SourceRange& loc,
207
+ GraphFunction& m,
208
+ Value* idx,
209
+ TypePtr type_hint = nullptr) override;
210
+
211
+ private:
212
+ Value* value_;
213
+ };
214
+
215
+ struct TORCH_API BuiltinFunction : public SugaredValue {
216
+ BuiltinFunction(Symbol symbol, c10::optional<NamedValue> self)
217
+ : symbol(symbol), self(std::move(self)) {}
218
+
219
+ // The symbol of the function (e.g. `aten::relu`).
220
+ Symbol symbol;
221
+
222
+ // if this is method, then this is the self argument.
223
+ c10::optional<NamedValue> self;
224
+ std::string kind() const override {
225
+ return "builtin";
226
+ }
227
+ std::shared_ptr<SugaredValue> call(
228
+ const SourceRange& loc,
229
+ GraphFunction& m,
230
+ at::ArrayRef<NamedValue> args,
231
+ at::ArrayRef<NamedValue> kwargs,
232
+ size_t n_binders) override;
233
+
234
+ // try to create this builtin but if it doesn't exist or the self argument
235
+ // cannot possibly match, then return nullptr. Use in situations where it is
236
+ // not clear if it is a valid builtin
237
+ static std::shared_ptr<BuiltinFunction> tryCreate(
238
+ Symbol symbol,
239
+ c10::optional<NamedValue> self);
240
+ };
241
+
242
+ struct TORCH_API SugaredTupleValue : public SugaredValue {
243
+ explicit SugaredTupleValue(std::vector<std::shared_ptr<SugaredValue>> tup)
244
+ : tup_(std::move(tup)){};
245
+
246
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
247
+ const SourceRange& loc,
248
+ GraphFunction& m,
249
+ const c10::optional<size_t>& size_hint = {}) override {
250
+ return tup_;
251
+ };
252
+
253
+ Value* asValue(const SourceRange& loc, GraphFunction& m) override {
254
+ std::vector<Value*> vec;
255
+ vec.reserve(tup_.size());
256
+ for (const auto& sv : tup_) {
257
+ vec.push_back(sv->asValue(loc, m));
258
+ }
259
+ Graph& g = *m.graph();
260
+ return g.insertNode(g.createTuple(vec))->output();
261
+ }
262
+
263
+ std::string kind() const override {
264
+ return "Tuple";
265
+ }
266
+
267
+ SugaredValuePtr getitem(
268
+ const SourceRange& loc,
269
+ GraphFunction& m,
270
+ Value* idx,
271
+ TypePtr type_hint = nullptr) override {
272
+ if (!(idx->type()->cast<IntType>() && toIValue(idx))) {
273
+ throw ErrorReport(loc)
274
+ << "Expected integer literal for index but got a variable or non-integer. "
275
+ << "ModuleList/Sequential indexing is only supported with integer literals. "
276
+ << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. "
277
+ << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'";
278
+ }
279
+ auto index = toIValue(idx)->toInt();
280
+ int64_t adj_index =
281
+ (index < 0) ? index + static_cast<int64_t>(tup_.size()) : index;
282
+ if (!(adj_index >= 0 && adj_index < static_cast<int64_t>(tup_.size()))) {
283
+ throw ErrorReport(loc)
284
+ << "Index " << index << " out of range of length " << tup_.size();
285
+ }
286
+ return tup_.at(adj_index);
287
+ }
288
+
289
+ // This function is called when a SugaredValue is used to convert a
290
+ // SugaredValue to its iterator. For example, when iterating through a Dict we
291
+ // iterate over its keys
292
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
293
+ override {
294
+ return shared_from_this();
295
+ };
296
+
297
+ // Because this is used to contain SugaredValues of Heterogenous types,
298
+ // we define staticLen() so that when this is iterated over it is emitted
299
+ // as an unrolled loop.
300
+ c10::optional<int64_t> staticLen() override {
301
+ return static_cast<int64_t>(tup_.size());
302
+ }
303
+
304
+ std::vector<std::shared_ptr<SugaredValue>> tup_;
305
+ };
306
+
307
+ struct TORCH_API BuiltinModule : public SugaredValue {
308
+ BuiltinModule(std::string name, c10::optional<int64_t> version = at::nullopt)
309
+ : name(std::move(name)), version(version) {}
310
+
311
+ std::string kind() const override {
312
+ return "builtin module";
313
+ }
314
+ std::shared_ptr<SugaredValue> attr(
315
+ const SourceRange& loc,
316
+ GraphFunction& m,
317
+ const std::string& field) override {
318
+ if (field == "autograd") {
319
+ // When refering torch.autograd, it is also considered to be a
320
+ // BuiltinModule and we will dispatch to the aten operators for the
321
+ // methods under its module.
322
+ return std::make_shared<BuiltinModule>("aten", version);
323
+ }
324
+
325
+ auto sym = Symbol::fromQualString(name + "::" + field);
326
+ return std::make_shared<BuiltinFunction>(sym, c10::nullopt);
327
+ }
328
+
329
+ private:
330
+ std::string name;
331
+ // when we add operator versioning, emit this op as it exising at 'version'
332
+ // if not set, use the latest version
333
+ c10::optional<int64_t> version;
334
+ };
335
+
336
+ // Represents a class, analagous to `int` or `dict`. Instances of classes,
337
+ // like `1` or `{"foo": 5}`, are represented as SimpleValues
338
+ struct TORCH_API ClassValue : public SugaredValue {
339
+ explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {}
340
+
341
+ // Call the type's constructor, as in:
342
+ // n = Foo(constructor_arg)
343
+ std::shared_ptr<SugaredValue> call(
344
+ const SourceRange& loc,
345
+ GraphFunction& m,
346
+ at::ArrayRef<NamedValue> args,
347
+ at::ArrayRef<NamedValue> kwargs,
348
+ size_t n_binders) override;
349
+
350
+ std::shared_ptr<SugaredValue> attr(
351
+ const SourceRange& loc,
352
+ GraphFunction& m,
353
+ const std::string& field) override;
354
+
355
+ std::string kind() const override {
356
+ return type_->str();
357
+ }
358
+
359
+ ClassTypePtr type_;
360
+ };
361
+
362
+ struct TORCH_API NamedTupleConstructor : public SugaredValue {
363
+ explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {}
364
+
365
+ std::shared_ptr<SugaredValue> call(
366
+ const SourceRange& loc,
367
+ GraphFunction& m,
368
+ at::ArrayRef<NamedValue> args,
369
+ at::ArrayRef<NamedValue> kwargs,
370
+ size_t n_binders) override;
371
+
372
+ std::string kind() const override {
373
+ return type_->str();
374
+ }
375
+
376
+ TupleTypePtr type_;
377
+ };
378
+
379
+ struct FunctionValue : public SugaredValue {
380
+ FunctionValue(Function* callee) : callees_({callee}) {}
381
+ FunctionValue(const StrongFunctionPtr& p)
382
+ : callees_({p.function_}), cu_(p.cu_) {}
383
+ FunctionValue(const std::vector<StrongFunctionPtr>& callees) {
384
+ for (const StrongFunctionPtr& callee : callees) {
385
+ cu_ = cu_ ? cu_ : callee.cu_;
386
+ TORCH_INTERNAL_ASSERT(callee.cu_ == cu_);
387
+ callees_.push_back(callee.function_);
388
+ }
389
+ }
390
+
391
+ std::string kind() const override {
392
+ return "function";
393
+ }
394
+
395
+ std::shared_ptr<SugaredValue> call(
396
+ const SourceRange& loc,
397
+ GraphFunction& f,
398
+ at::ArrayRef<NamedValue> args,
399
+ at::ArrayRef<NamedValue> kwargs,
400
+ size_t n_binders) override {
401
+ std::vector<const FunctionSchema*> schemas;
402
+ for (Function* callee : callees_) {
403
+ try {
404
+ callee->ensure_defined();
405
+ } catch (const RecursiveMethodCallError&) {
406
+ throw ErrorReport(loc)
407
+ << " function '" << callee->name() << "' is called recursively. "
408
+ << "Recursive calls are not supported";
409
+ }
410
+ schemas.push_back(&callee->getSchema());
411
+ }
412
+ auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs);
413
+ Value* output =
414
+ f.graph()->insertFunctionCall(callees_[match.first], match.second);
415
+ output->node()->setSourceRange(loc);
416
+ return std::make_shared<SimpleValue>(output);
417
+ }
418
+
419
+ const std::vector<Function*>& callees() {
420
+ return callees_;
421
+ }
422
+
423
+ private:
424
+ std::vector<Function*> callees_;
425
+ // TODO holding this thing is creepy
426
+ std::shared_ptr<CompilationUnit> cu_;
427
+ };
428
+
429
+ struct TORCH_API ClosureValue : public SugaredValue {
430
+ ClosureValue(Value* value) : value_(value) {
431
+ TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure);
432
+ }
433
+ std::string kind() const override {
434
+ return "closure";
435
+ }
436
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
437
+ return value_;
438
+ }
439
+ Value* value_;
440
+ };
441
+
442
+ // defines how a method obtained from a module/class/interface behaves in script
443
+ struct MethodValue : public SugaredValue {
444
+ MethodValue(Value* self, std::vector<std::string> method_names)
445
+ : self_(self), method_names_(std::move(method_names)) {}
446
+ MethodValue(Value* self, std::string method_name)
447
+ : MethodValue(self, std::vector<std::string>({std::move(method_name)})) {}
448
+
449
+ std::string kind() const override {
450
+ return "method";
451
+ }
452
+
453
+ std::shared_ptr<SugaredValue> call(
454
+ const SourceRange& loc,
455
+ GraphFunction& f,
456
+ at::ArrayRef<NamedValue> args,
457
+ at::ArrayRef<NamedValue> kwargs,
458
+ size_t n_binders) override {
459
+ std::vector<NamedValue> argsWithSelf = {self_};
460
+ argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end());
461
+ std::vector<const FunctionSchema*> schemas;
462
+ for (const std::string& method_name : method_names_) {
463
+ if (auto class_type = self_->type()->cast<ClassType>()) {
464
+ Function& method = class_type->getMethod(method_name);
465
+ try {
466
+ method.ensure_defined();
467
+ } catch (const RecursiveMethodCallError&) {
468
+ throw ErrorReport(loc)
469
+ << " method '" << method.name() << "' is called recursively. "
470
+ << "Recursive calls are not supported";
471
+ }
472
+ schemas.push_back(&method.getSchema());
473
+ } else if (auto interface_type = self_->type()->cast<InterfaceType>()) {
474
+ schemas.push_back(interface_type->getMethod(method_name));
475
+ } else {
476
+ TORCH_INTERNAL_ASSERT(
477
+ false, "method constructed that is not a class or interface");
478
+ }
479
+ }
480
+ auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs);
481
+ Value* output =
482
+ f.graph()->insertMethodCall(method_names_[match.first], match.second);
483
+ output->node()->setSourceRange(loc);
484
+ return std::make_shared<SimpleValue>(output);
485
+ }
486
+
487
+ private:
488
+ Value* self_;
489
+ std::vector<std::string> method_names_;
490
+ };
491
+
492
+ struct TORCH_API PrintValue : public SugaredValue {
493
+ std::string kind() const override {
494
+ return "print";
495
+ }
496
+ std::shared_ptr<SugaredValue> call(
497
+ const SourceRange& loc,
498
+ GraphFunction& m,
499
+ at::ArrayRef<NamedValue> args,
500
+ at::ArrayRef<NamedValue> kwargs,
501
+ size_t n_binders) override;
502
+ };
503
+
504
+ // expressions like int(x)
505
+ // these are the same as call prim::Int or equivalent except it
506
+ // is a noop when the input is a subtype of 'type'
507
+ struct TORCH_API CastValue : public BuiltinFunction {
508
+ CastValue(TypePtr type, c10::Symbol method)
509
+ : BuiltinFunction(method, c10::nullopt), type_(std::move(type)) {}
510
+ std::shared_ptr<SugaredValue> call(
511
+ const SourceRange& loc,
512
+ GraphFunction& m,
513
+ at::ArrayRef<NamedValue> args,
514
+ at::ArrayRef<NamedValue> kwargs,
515
+ size_t n_binders) override {
516
+ if (args.size() == 1 && kwargs.empty()) {
517
+ auto len_op = std::make_shared<BuiltinFunction>(aten::len, at::nullopt);
518
+ auto gt_op = std::make_shared<BuiltinFunction>(aten::gt, at::nullopt);
519
+ auto zero = m.graph()->insertConstant(0);
520
+
521
+ auto v = args[0].value(*m.graph());
522
+ if (v->type()->isSubtypeOf(*type_)) {
523
+ return std::make_shared<SimpleValue>(v);
524
+ } else if (
525
+ *type_ == *BoolType::get() &&
526
+ (v->type()->isSubtypeOf(*AnyListType::get()) ||
527
+ v->type()->isSubtypeOf(*StringType::get()) ||
528
+ v->type()->cast<DictType>())) {
529
+ auto len = len_op->call(loc, m, {v}, {}, 1);
530
+ return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1);
531
+ }
532
+ }
533
+ return BuiltinFunction::call(loc, m, args, kwargs, n_binders);
534
+ }
535
+
536
+ private:
537
+ TypePtr type_;
538
+ };
539
+
540
+ struct TORCH_API TensorCastValue : public SugaredValue {
541
+ TensorCastValue(at::ScalarType type, NamedValue self)
542
+ : dtype_(type), self_(std::move(self)) {}
543
+
544
+ std::string kind() const override {
545
+ return "Cast";
546
+ }
547
+
548
+ std::shared_ptr<SugaredValue> call(
549
+ const SourceRange& loc,
550
+ GraphFunction& m,
551
+ at::ArrayRef<NamedValue> args,
552
+ at::ArrayRef<NamedValue> kwargs,
553
+ size_t n_binders) override {
554
+ TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty());
555
+ Value* dtype_const = m.graph()->insertConstant(dtype_, loc);
556
+ std::vector<NamedValue> kwargs_{
557
+ self_, NamedValue(loc, "dtype", dtype_const)};
558
+ Value* casted_val = m.graph()->insert(
559
+ /*opname=*/Symbol::fromQualString("aten::to"),
560
+ /*args=*/args,
561
+ /*kwargs=*/kwargs_,
562
+ /*range=*/loc);
563
+ return std::make_shared<SimpleValue>(casted_val);
564
+ }
565
+
566
+ at::ScalarType dtype_;
567
+ NamedValue self_;
568
+ };
569
+
570
+ // builtins operators and functions that call a method if it exists
571
+ // on a class type, like 'len(x)' and 'x + y'
572
+ struct TORCH_API MagicMethod : public SugaredValue {
573
+ MagicMethod(std::string desugared_name, SugaredValuePtr base)
574
+ : base_value_(std::move(base)),
575
+ desugared_name_(std::move(desugared_name)) {}
576
+
577
+ std::string kind() const override {
578
+ return desugared_name_;
579
+ }
580
+
581
+ std::shared_ptr<SugaredValue> call(
582
+ const SourceRange& loc,
583
+ GraphFunction& m,
584
+ at::ArrayRef<NamedValue> args,
585
+ at::ArrayRef<NamedValue> kwargs,
586
+ size_t n_binders) override;
587
+
588
+ private:
589
+ SugaredValuePtr base_value_;
590
+ std::string desugared_name_;
591
+ };
592
+
593
+ // things that look like function applications, but
594
+ // perform non-standard evaluation are represented
595
+ // with SpecialFormValues, e.g.
596
+ // isinstance(x, int)
597
+ // fork(fn)
598
+ // annotate(int, 3)
599
+ // The implementation of each value is handled by a case inside emitApplyExpr
600
+ struct TORCH_API SpecialFormValue : public SugaredValue {
601
+ SpecialFormValue(Symbol form) : form_(form) {}
602
+ std::string kind() const override {
603
+ return form_.toUnqualString();
604
+ }
605
+ Symbol form() const {
606
+ return form_;
607
+ }
608
+ static std::shared_ptr<SpecialFormValue> create(Symbol form) {
609
+ return std::make_shared<SpecialFormValue>(form);
610
+ }
611
+
612
+ private:
613
+ Symbol form_;
614
+ };
615
+
616
+ struct TORCH_API LegacyTensorConstructor : public SpecialFormValue {
617
+ LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device)
618
+ : SpecialFormValue(form), device_(device), dtype_(dtype) {}
619
+
620
+ static std::shared_ptr<LegacyTensorConstructor> create(
621
+ Symbol form,
622
+ at::ScalarType dtype,
623
+ at::Device device) {
624
+ return std::make_shared<LegacyTensorConstructor>(form, dtype, device);
625
+ }
626
+ at::ScalarType dtype() const {
627
+ return dtype_;
628
+ }
629
+
630
+ private:
631
+ at::Device device_;
632
+ at::ScalarType dtype_;
633
+ };
634
+
635
+ // matched against for special handling of range expressions
636
+ struct TORCH_API RangeValue : SugaredValue {
637
+ RangeValue(
638
+ const SourceRange& loc,
639
+ GraphFunction& m,
640
+ std::vector<Value*> input,
641
+ c10::optional<int64_t> static_len = c10::nullopt);
642
+
643
+ std::string kind() const override {
644
+ return "range";
645
+ }
646
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
647
+ SugaredValuePtr getitem(
648
+ const SourceRange& loc,
649
+ GraphFunction& m,
650
+ Value* idx,
651
+ TypePtr type_hint = nullptr) override;
652
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
653
+ override;
654
+
655
+ // When Range is instantiated via enumerate(iterable_with_static_len),
656
+ // then it takes the static length of the iterable
657
+ c10::optional<int64_t> staticLen() override {
658
+ return static_len_;
659
+ }
660
+
661
+ private:
662
+ Value* start_{};
663
+ Value* end_{};
664
+ Value* step_{};
665
+ // a flag to determine if it's a simple range() call with only end_ from
666
+ // arguments If true, we will not insert length calculation and index
667
+ // derivation nodes to simplify the graph and enable more possible
668
+ // optimizations
669
+ bool has_only_end_{};
670
+ c10::optional<int64_t> static_len_;
671
+ };
672
+
673
+ // Specialized Tree structure to matched against for special handling
674
+ // of builtin functions iterables expressions like zip(), enumerate(), etc.
675
+ // zip and enumerate can be modeled as a tree of SimpleValue/RangeValue:
676
+ // zip(x, y) -> (x, y) with tuple assignment to each loop target
677
+ // enumerate(x) -> (range(0, math.inf, 1), x)
678
+ // So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be:
679
+ // (a, (range(0, math.inf, 1), b), range(0, 100))
680
+ // We use those base iterables to fill in the loop information like
681
+ // max_trip_count and set the value table for loop targets
682
+ // Iterables can contain lists of SugaredValues like ModuleLists. If it
683
+ // does, then we emit it unrolled and require that all values it contains
684
+ // have a statically-determinable length.
685
+ struct TORCH_API IterableTree : SugaredValue {
686
+ IterableTree() = default;
687
+ IterableTree(
688
+ const SourceRange& range,
689
+ GraphFunction& m,
690
+ at::ArrayRef<SugaredValuePtr> children) {
691
+ for (const auto& child : children) {
692
+ addChild(range, m, child);
693
+ }
694
+ }
695
+ std::string kind() const override {
696
+ return "iterabletree";
697
+ }
698
+
699
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
700
+ override {
701
+ return shared_from_this();
702
+ }
703
+
704
+ void addChild(
705
+ const SourceRange& range,
706
+ GraphFunction& m,
707
+ const SugaredValuePtr& iter_value);
708
+
709
+ std::vector<SugaredValuePtr> get_children() {
710
+ return children_;
711
+ }
712
+
713
+ // If this iterable contains a ModuleList or Tuple, then it will have a
714
+ // static length, and we will emit it as an unrolled for loop.
715
+ c10::optional<int64_t> staticLen() override {
716
+ return unroll_length_;
717
+ }
718
+
719
+ // given a IterableTree node, get all the base iterables/leaves under the
720
+ // IterableTree node. This enables
721
+ // us to get all the basic SugaredValues that contains valid loop information
722
+ // with len() and getitem()
723
+ std::vector<SugaredValuePtr> get_base_iterables();
724
+
725
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
726
+ SugaredValuePtr getitem(
727
+ const SourceRange& loc,
728
+ GraphFunction& m,
729
+ Value* idx,
730
+ TypePtr type_hint = nullptr) override;
731
+
732
+ private:
733
+ c10::optional<int64_t> unroll_length_ = c10::nullopt;
734
+ std::vector<SugaredValuePtr> children_;
735
+ };
736
+
737
+ static inline std::vector<Value*> toValues(
738
+ Graph& g,
739
+ at::ArrayRef<NamedValue> nvs) {
740
+ return fmap(nvs, [&](const NamedValue& v) { return v.value(g); });
741
+ }
742
+
743
+ struct SimpleSelf : public Self {
744
+ explicit SimpleSelf(ClassTypePtr classType)
745
+ : Self(), classType_(std::move(classType)) {}
746
+ std::shared_ptr<SugaredValue> makeSugared(Value* v) const override {
747
+ v->setType(classType_);
748
+ return std::make_shared<SimpleValue>(v);
749
+ }
750
+ ClassTypePtr getClassType() const override {
751
+ return classType_;
752
+ }
753
+
754
+ private:
755
+ ClassTypePtr classType_;
756
+ };
757
+
758
+ // This is not a SimpleValue so it can not pass through the code paths that
759
+ // expect a SimpleValue as a sugared value.
760
+ struct TORCH_API ExceptionMessageValue : public SugaredValue {
761
+ explicit ExceptionMessageValue(
762
+ Value* value,
763
+ Value* qualified_class_name = nullptr)
764
+ : value_(value), qualified_class_name_(qualified_class_name) {}
765
+
766
+ std::string kind() const override {
767
+ return "exception message";
768
+ }
769
+
770
+ Value* getValue() {
771
+ return value_;
772
+ }
773
+
774
+ // qualified python class name
775
+ Value* getQualifiedClassName() {
776
+ return qualified_class_name_;
777
+ }
778
+
779
+ private:
780
+ Value* value_;
781
+ Value* qualified_class_name_;
782
+ };
783
+
784
+ struct TORCH_API ExceptionValue : public SugaredValue {
785
+ explicit ExceptionValue(std::string message) : message_(std::move(message)) {}
786
+
787
+ std::string kind() const override {
788
+ return "exception";
789
+ }
790
+
791
+ std::shared_ptr<SugaredValue> call(
792
+ const SourceRange& loc,
793
+ GraphFunction& m,
794
+ at::ArrayRef<NamedValue> args,
795
+ at::ArrayRef<NamedValue> /*attributes*/,
796
+ size_t /*n_binders*/) override {
797
+ auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc);
798
+ for (auto& input : args) {
799
+ auto input_str = input.value(*m.graph());
800
+ if (!input_str->type()->isSubtypeOf(*StringType::get())) {
801
+ input_str =
802
+ emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {});
803
+ }
804
+ exception_message = emitBuiltinCall(
805
+ loc, *m.graph(), aten::add, {exception_message, input_str}, {});
806
+ }
807
+ return std::make_shared<ExceptionMessageValue>(exception_message);
808
+ }
809
+
810
+ std::string message_;
811
+ };
812
+
813
+ struct TORCH_API SugaredEnumClass : public SugaredValue {
814
+ explicit SugaredEnumClass(EnumTypePtr enum_type)
815
+ : enum_type_(std::move(enum_type)) {}
816
+
817
+ std::string kind() const override {
818
+ return "EnumClass";
819
+ }
820
+
821
+ SugaredValuePtr attr(
822
+ const SourceRange& loc,
823
+ GraphFunction& m,
824
+ const std::string& field) override;
825
+
826
+ SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override;
827
+
828
+ private:
829
+ EnumTypePtr enum_type_;
830
+ };
831
+
832
+ struct TORCH_API SliceValue : public SugaredValue {
833
+ explicit SliceValue(Value* start, Value* stop, Value* step)
834
+ : start_(start), stop_(stop), step_(step) {}
835
+
836
+ std::string kind() const override {
837
+ return "Python slice value";
838
+ }
839
+
840
+ Value* start() {
841
+ return start_;
842
+ };
843
+ Value* stop() {
844
+ return stop_;
845
+ };
846
+ Value* step() {
847
+ return step_;
848
+ };
849
+
850
+ private:
851
+ Value* start_;
852
+ Value* stop_;
853
+ Value* step_;
854
+ };
855
+
856
+ } // namespace jit
857
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dimname.h>
4
+ #include <ATen/core/class_type.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <ATen/core/symbol.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <torch/csrc/Export.h>
10
+
11
+ #include <torch/csrc/jit/frontend/source_range.h>
12
+ #include <torch/csrc/utils/variadic.h>
13
+
14
+ #include <cstdint>
15
+ #include <memory>
16
+ #include <mutex>
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace torch {
21
+ namespace jit {
22
+ struct Node;
23
+ struct Value;
24
+ struct Graph;
25
+ struct Module;
26
+
27
+ namespace tracer {
28
+
29
+ using ::c10::ivalue::Shared;
30
+
31
+ using ::c10::IValue;
32
+ using ::c10::ivalue::Future;
33
+
34
+ using ::c10::ArrayRef;
35
+ using ::c10::TupleType;
36
+ using ::c10::TupleTypePtr;
37
+ using ::c10::ivalue::ConstantString;
38
+
39
+ using torch::autograd::Variable;
40
+ using variable_list = std::vector<Variable>;
41
+
42
+ TORCH_API std::atomic<bool>& getTracerStateWarnMode();
43
+
44
+ struct TORCH_API TracingState
45
+ : public std::enable_shared_from_this<TracingState> {
46
+ TracingState();
47
+ ~TracingState();
48
+
49
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
50
+ std::shared_ptr<Graph> graph;
51
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
52
+ bool warn = getTracerStateWarnMode();
53
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
54
+ bool strict = true;
55
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
56
+ bool force_outplace = false;
57
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
58
+ std::function<std::string(const Variable& var)> lookup_var_name_fn =
59
+ [](const Variable& var) { return ""; };
60
+
61
+ void enterFrame() {
62
+ env_stack.emplace_back();
63
+ }
64
+
65
+ void leaveFrame() {
66
+ env_stack.pop_back();
67
+ }
68
+
69
+ void setValue(const IValue& v, Value* value);
70
+ void delValue(const IValue& var);
71
+ Value* getValue(const IValue& var);
72
+ Value* getOutput(const IValue& var, size_t i);
73
+ bool hasValue(const IValue& var) const;
74
+
75
+ Node* createNode(c10::Symbol op_name, size_t num_outputs);
76
+ void insertNode(Node* node);
77
+
78
+ private:
79
+ using WeakIValue = at::WeakIValue;
80
+
81
+ struct WeakIValueHasher {
82
+ size_t operator()(const WeakIValue& t) const {
83
+ return t.hash();
84
+ }
85
+ };
86
+
87
+ struct WeakIValueEq {
88
+ bool operator()(const WeakIValue& t1, const WeakIValue& t2) const {
89
+ return t1.isSameIdentity(t2);
90
+ }
91
+ };
92
+
93
+ using Frame =
94
+ std::unordered_map<WeakIValue, Value*, WeakIValueHasher, WeakIValueEq>;
95
+ std::vector<Frame> env_stack;
96
+ };
97
+
98
+ // This is meant to be used as a thread local place, where we can store extra
99
+ // info that gets lost when we call into ATen from Python bindings. One example
100
+ // for when this happens is when we get an IntArrayRef argument with e.g. sizes
101
+ // for view. When tracing, those might be tensors, which let us encode extra
102
+ // data dependencies, but once they get to the ATen call where we actually have
103
+ // the tracing logic, they get converted into a raw IntArrayRef, and we loose
104
+ // all information. To prevent this, we temporarily stash it in here.
105
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
106
+ struct ArgumentStash {
107
+ struct IntArrayRefTrace : std::vector<Value*> {
108
+ IntArrayRefTrace(int size) : std::vector<Value*>(size, nullptr) {}
109
+ };
110
+
111
+ static bool empty() {
112
+ return stash.intlists.empty();
113
+ }
114
+
115
+ TORCH_API static void stashIntArrayRefElem(
116
+ const std::string& arg_name,
117
+ size_t size,
118
+ size_t idx,
119
+ const Variable& var);
120
+
121
+ static bool hasIntArrayRef(const std::string& arg_name) {
122
+ return stash.intlists.count(arg_name) > 0;
123
+ }
124
+
125
+ static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) {
126
+ auto info = std::move(stash.intlists.at(arg_name));
127
+ stash.intlists.erase(arg_name);
128
+ return info;
129
+ }
130
+
131
+ // Value stashing: Use these methods to stash arguments which correspond
132
+ // to regular Value*'s in the graph. i.e. they don't require special
133
+ // handling like in the case of IntArrayRefs
134
+ TORCH_API static void stashValue(
135
+ const std::string& arg_name,
136
+ size_t idx,
137
+ const Variable& var,
138
+ const c10::TypePtr& type = nullptr);
139
+
140
+ static bool hasValue(const std::string& arg_name) {
141
+ return stash.values.count(arg_name) > 0;
142
+ }
143
+
144
+ static Value* popValue(const std::string& arg_name) {
145
+ auto info = stash.values.at(arg_name);
146
+ stash.values.erase(arg_name);
147
+ return info;
148
+ }
149
+
150
+ private:
151
+ static thread_local ArgumentStash stash;
152
+ std::unordered_map<std::string, IntArrayRefTrace> intlists;
153
+ std::unordered_map<std::string, Value*> values;
154
+ };
155
+
156
+ // Retrieve or set the current tracing state. Returns a nullptr if tracing is
157
+ // disabled.
158
+ TORCH_API const std::shared_ptr<TracingState>& getTracingState();
159
+ TORCH_API void setTracingState(std::shared_ptr<TracingState> state);
160
+
161
+ inline bool isTracing() {
162
+ return static_cast<bool>(getTracingState());
163
+ }
164
+
165
+ using warn_fn_type = void (*)(const std::string& msg);
166
+ TORCH_API extern const char* WARN_PYTHON_DATAFLOW;
167
+ TORCH_API extern const char* WARN_CONSTRUCTOR;
168
+ TORCH_API extern const char* WARN_RESIZE;
169
+ TORCH_API extern const char* STRICT_TRACER_MSG;
170
+ TORCH_API void _do_warn(const char* _reason, const char* _kind);
171
+ inline void warn(const char* _reason, const char* _kind = nullptr) {
172
+ if (const auto& state = getTracingState()) {
173
+ if (!state->warn)
174
+ return;
175
+ _do_warn(_reason, _kind);
176
+ }
177
+ }
178
+ TORCH_API void setWarn(warn_fn_type fn);
179
+
180
+ struct TORCH_API NoWarn {
181
+ NoWarn() : state(getTracingState()) {
182
+ if (state) {
183
+ prev = state->warn;
184
+ state->warn = false;
185
+ }
186
+ }
187
+ ~NoWarn() {
188
+ if (state) {
189
+ state->warn = prev;
190
+ }
191
+ }
192
+ std::shared_ptr<TracingState> state;
193
+ bool prev{false};
194
+ };
195
+
196
+ struct WithNestedTracingFrame {
197
+ WithNestedTracingFrame() {
198
+ getTracingState()->enterFrame();
199
+ }
200
+
201
+ ~WithNestedTracingFrame() {
202
+ getTracingState()->leaveFrame();
203
+ }
204
+ };
205
+ TORCH_API void recordSourceLocation(Node* n);
206
+ TORCH_API void setRecordSourceLocation(void (*v)(Node*));
207
+
208
+ TORCH_API std::vector<StackEntry> pythonCallstack();
209
+ TORCH_API void setPythonCallstack(std::vector<StackEntry> (*v)());
210
+
211
+ // Having finished adding a new 'node' to the graph IR 'setValueTrace'
212
+ // associates this node with an output variable, so that further operations
213
+ // involving this variable know which node in the IR to reference.
214
+ TORCH_API void setValueTrace(const IValue& v, Value* value);
215
+
216
+ TORCH_API void delValueTrace(const IValue& var);
217
+
218
+ TORCH_API std::function<void()> pauseTracing();
219
+
220
+ TORCH_API Value* getValueTrace(const IValue& var);
221
+
222
+ TORCH_API std::pair<std::shared_ptr<TracingState>, Stack> trace(
223
+ Stack inputs,
224
+ const std::function<Stack(Stack)>& traced_fn,
225
+ std::function<std::string(const Variable&)> var_name_lookup_fn,
226
+ bool strict = true,
227
+ bool force_outplace = false,
228
+ Module* self = nullptr,
229
+ const std::vector<std::string>& argument_names = {});
230
+
231
+ TORCH_API void abandon();
232
+
233
+ // NB: those serve both as an intermediate steps in addInputs below,
234
+ // as well as the overloads that terminate template recursion
235
+ TORCH_API void addInputs(Node* n, const char* name, int64_t value);
236
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value);
237
+ TORCH_API void addInputs(
238
+ Node* n,
239
+ const char* name,
240
+ c10::optional<int64_t> value);
241
+ TORCH_API void addInputs(Node* n, const char* name, bool value);
242
+ TORCH_API void addInputs(
243
+ Node* n,
244
+ const char* name,
245
+ const c10::optional<bool>& value);
246
+ TORCH_API void addInputs(Node* n, const char* name, double value);
247
+ TORCH_API void addInputs(
248
+ Node* n,
249
+ const char* name,
250
+ const c10::optional<double>& value);
251
+ TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value);
252
+ TORCH_API void addInputs(
253
+ Node* n,
254
+ const char* name,
255
+ const c10::optional<at::Scalar>& value);
256
+ TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value);
257
+ TORCH_API void addInputs(
258
+ Node* n,
259
+ const char* name,
260
+ const c10::optional<at::Tensor>& value);
261
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<int64_t> value);
262
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value);
263
+ TORCH_API void addInputs(
264
+ Node* n,
265
+ const char* name,
266
+ c10::optional<c10::SymInt> value);
267
+ TORCH_API void addInputs(
268
+ Node* n,
269
+ const char* name,
270
+ const c10::optional<ArrayRef<int64_t>>& value);
271
+ TORCH_API void addInputs(
272
+ Node* n,
273
+ const char* name,
274
+ const at::OptionalIntArrayRef& opt_value);
275
+ TORCH_API void addInputs(
276
+ Node* n,
277
+ const char* name,
278
+ const at::OptionalSymIntArrayRef& opt_value);
279
+ TORCH_API void addInputs(
280
+ Node* n,
281
+ const char* name,
282
+ ArrayRef<at::Tensor> value,
283
+ bool allow_undefined = false);
284
+ TORCH_API void addInputs(
285
+ Node* n,
286
+ const char* name,
287
+ std::vector<at::Tensor> value,
288
+ bool allow_undefined = false);
289
+ TORCH_API void addInputs(
290
+ Node* n,
291
+ const char* name,
292
+ at::ITensorListRef value,
293
+ bool allow_undefined = false);
294
+ TORCH_API void addInputs(
295
+ Node* n,
296
+ const char* name,
297
+ const List<c10::optional<at::Tensor>>& value);
298
+ TORCH_API void addInputs(
299
+ Node* n,
300
+ const char* name,
301
+ ArrayRef<c10::intrusive_ptr<c10::ivalue::Object>> value,
302
+ const c10::ClassTypePtr& class_type);
303
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<double> value);
304
+ TORCH_API void addInputs(
305
+ Node* n,
306
+ const char* name,
307
+ const c10::optional<ArrayRef<double>>& value);
308
+ TORCH_API void addInputs(
309
+ Node* n,
310
+ const char* name,
311
+ const c10::string_view value);
312
+ TORCH_API void addInputs(
313
+ Node* n,
314
+ const char* name,
315
+ const c10::optional<c10::string_view>& value);
316
+ TORCH_API void addInputs(Node* n, const char* name, at::Device value);
317
+ TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream);
318
+ TORCH_API void addInputs(Node* n, const char* name, at::Layout value);
319
+ TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value);
320
+ TORCH_API void addInputs(
321
+ Node* n,
322
+ const char* name,
323
+ const c10::optional<at::ScalarType>& value);
324
+ TORCH_API void addInputs(
325
+ Node* n,
326
+ const char* name,
327
+ const c10::optional<at::Device>& value);
328
+ TORCH_API void addInputs(
329
+ Node* n,
330
+ const char* name,
331
+ const c10::optional<at::Layout>& value);
332
+ TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value);
333
+ TORCH_API void addInputs(
334
+ Node* n,
335
+ const char* name,
336
+ c10::optional<at::DimnameList> value);
337
+ TORCH_API void addInputs(
338
+ Node* n,
339
+ const char* name,
340
+ const c10::optional<at::MemoryFormat>& value);
341
+ TORCH_API void addInputs(
342
+ Node* n,
343
+ const char* name,
344
+ const c10::optional<at::Generator>& value);
345
+
346
+ inline void addInputs(
347
+ Node* n,
348
+ const char* name,
349
+ const std::vector<bool>& value) {
350
+ AT_ERROR("Tracing a list of bool type is currently not supported!");
351
+ }
352
+
353
+ template <typename T>
354
+ void addInputs(Node* n, const char* name, ArrayRef<T> value) {
355
+ AT_ERROR("Tracing a list of arbitrary type is currently not supported!");
356
+ }
357
+ template <typename K, typename V>
358
+ void addInputs(
359
+ Node* n,
360
+ const char* name,
361
+ const std::unordered_map<K, V>& value) {
362
+ AT_ERROR("Tracing a dict of arbitrary types is currently not supported!");
363
+ }
364
+
365
+ template <size_t N>
366
+ void addInputs(Node* n, const char* name, std::array<bool, N> value) {
367
+ throw std::runtime_error(
368
+ "Found an unsupported argument type in the JIT tracer. File a bug report.");
369
+ }
370
+
371
+ TORCH_API void addInputs(
372
+ Node* n,
373
+ const char* name,
374
+ const c10::intrusive_ptr<c10::ivalue::Object>& obj);
375
+
376
+ TORCH_API void ensureUniqueIfOutOfPlaced(
377
+ const char* name,
378
+ const at::Tensor& tensor);
379
+ TORCH_API void ensureUniqueIfOutOfPlaced(
380
+ const char* name,
381
+ const c10::optional<at::Tensor>& tensor);
382
+
383
+ template <
384
+ typename T,
385
+ typename = torch::enable_if_t<(
386
+ !std::is_convertible<torch::decay_t<T>, at::TensorList>::value &&
387
+ !std::is_convertible<torch::decay_t<T>, c10::List<at::Tensor>>::value &&
388
+ !std::is_convertible<torch::decay_t<T>, at::Tensor>::value &&
389
+ !std::is_convertible<
390
+ torch::decay_t<T>,
391
+ c10::intrusive_ptr<c10::ivalue::Object>>::value)>>
392
+ void addOutput(Node* node, T&&) {
393
+ AT_ERROR(
394
+ "Found an unsupported argument type ",
395
+ c10::demangle_type<T>(),
396
+ " in the JIT tracer. File a bug report.");
397
+ }
398
+ TORCH_API void addOutput(Node* node, const at::Tensor& tensor);
399
+ TORCH_API void setOutput(Value* value, const at::Tensor& output);
400
+ TORCH_API void addOutput(Node* node, const std::vector<at::Tensor>& list);
401
+ TORCH_API void addOutput(Node* node, const c10::List<at::Tensor>& list);
402
+ TORCH_API void addOutput(
403
+ Node* node,
404
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
405
+
406
+ TORCH_API autograd::Variable getSizeOf(
407
+ const autograd::Variable& var,
408
+ int64_t dim);
409
+
410
+ TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var);
411
+
412
+ } // namespace tracer
413
+ } // namespace jit
414
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ #include <c10/util/SmallVector.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <torch/csrc/jit/frontend/lexer.h>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ // Trees are used to represent all forms of TC IR, pre- and post-typechecking.
16
+ // Rather than have a full class hierarchy for all TC statements, trees are a
17
+ // slight variation of Lisp s-expressions. For instance, the expression a*b+1
18
+ // is represented as:
19
+ // (+ (* (ident a) (ident b)) (const 1))
20
+ // Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which
21
+ // define stringValue(). Everything else is a Compound object, which has a
22
+ // 'kind' that is a token from lexer.h's TokenKind enum. Single-character
23
+ // operators like '+' are represented using the character itself (so, add.kind()
24
+ // would be '+'). Each Compound object also contains a list of subtrees and is
25
+ // associated with a SourceRange for error reporting.
26
+ // Memory management of trees is done using intrusive_ptr.
27
+
28
+ struct Tree;
29
+ using TreeRef = c10::intrusive_ptr<Tree>;
30
+ using TreeList = at::SmallVector<TreeRef, 4>;
31
+
32
+ struct Tree : c10::intrusive_ptr_target {
33
+ Tree(int kind_) : kind_(kind_) {}
34
+ int kind() const {
35
+ return kind_;
36
+ }
37
+ virtual bool isAtom() const {
38
+ return true;
39
+ }
40
+ virtual const SourceRange& range() const {
41
+ throw std::runtime_error("is an Atom");
42
+ }
43
+ virtual const std::string& stringValue() const {
44
+ throw std::runtime_error("stringValue can only be called on TK_STRING");
45
+ }
46
+ virtual const TreeList& trees() const {
47
+ static const TreeList empty_trees = {};
48
+ return empty_trees;
49
+ }
50
+ const TreeRef& tree(size_t i) const {
51
+ return trees().at(i);
52
+ }
53
+ virtual TreeRef map(const std::function<TreeRef(TreeRef)>& fn) {
54
+ (void)fn;
55
+ c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
56
+ // from a raw `this` pointer
57
+ // so we need to bump the refcount
58
+ // to account for this ownership
59
+ return TreeRef::reclaim(this);
60
+ }
61
+ template <typename... Args>
62
+ void match(int k, Args&... args) const {
63
+ matchD(k, "unknown", 0, args...);
64
+ }
65
+ template <typename... Args>
66
+ void matchD(int k, const char* filename, int lineno, Args&... args) const {
67
+ std::initializer_list<TreeRef*> vars = {args...};
68
+ matchNumSubtreesD(k, filename, lineno, vars.size(), true);
69
+ size_t i = 0;
70
+ for (TreeRef* v : vars) {
71
+ *v = trees()[i++];
72
+ }
73
+ }
74
+ void matchNumSubtrees(int k, size_t expected_subtrees) {
75
+ return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false);
76
+ }
77
+ void matchNumSubtreesD(
78
+ int k,
79
+ const char* filename,
80
+ int lineno,
81
+ size_t expected_subtrees,
82
+ bool allow_more) const {
83
+ if (kind() != k) {
84
+ std::stringstream ss;
85
+ ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k)
86
+ << "' but found '" << kindToString(kind()) << "'\n";
87
+ range().highlight(ss);
88
+ throw std::runtime_error(ss.str());
89
+ }
90
+ if (trees().size() < expected_subtrees ||
91
+ (!allow_more && trees().size() != expected_subtrees)) {
92
+ std::stringstream ss;
93
+ ss << filename << ":" << lineno << ": expected at least "
94
+ << expected_subtrees << " subtrees, but found only " << trees().size()
95
+ << "\n";
96
+ range().highlight(ss);
97
+ throw std::runtime_error(ss.str());
98
+ }
99
+ }
100
+ ~Tree() override = default;
101
+
102
+ private:
103
+ int kind_;
104
+ };
105
+
106
+ struct String : public Tree {
107
+ String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {}
108
+ const std::string& stringValue() const override {
109
+ return value_;
110
+ }
111
+ template <typename... Args>
112
+ static TreeRef create(Args&&... args) {
113
+ return c10::make_intrusive<String>(std::forward<Args>(args)...);
114
+ }
115
+
116
+ private:
117
+ std::string value_;
118
+ };
119
+
120
+ static SourceRange mergeRanges(SourceRange c, const TreeList& others) {
121
+ for (const auto& t : others) {
122
+ if (t->isAtom())
123
+ continue;
124
+ size_t s = std::min(c.start(), t->range().start());
125
+ size_t e = std::max(c.end(), t->range().end());
126
+ c = SourceRange(c.source(), s, e);
127
+ }
128
+ return c;
129
+ }
130
+
131
+ struct Compound : public Tree {
132
+ Compound(int kind, SourceRange range)
133
+ : Tree(kind), range_(std::move(range)) {}
134
+ Compound(int kind, const SourceRange& range_, TreeList&& trees_)
135
+ : Tree(kind),
136
+ range_(mergeRanges(range_, trees_)),
137
+ trees_(std::move(trees_)) {}
138
+ const TreeList& trees() const override {
139
+ return trees_;
140
+ }
141
+ static TreeRef create(
142
+ int kind,
143
+ const SourceRange& range_,
144
+ TreeList&& trees_) {
145
+ return c10::make_intrusive<Compound>(kind, range_, std::move(trees_));
146
+ }
147
+ bool isAtom() const override {
148
+ return false;
149
+ }
150
+ TreeRef map(const std::function<TreeRef(TreeRef)>& fn) override {
151
+ TreeList ret;
152
+ for (auto& t : trees()) {
153
+ ret.push_back(fn(t));
154
+ }
155
+ return Compound::create(kind(), range(), std::move(ret));
156
+ }
157
+
158
+ const SourceRange& range() const override {
159
+ return range_;
160
+ }
161
+
162
+ private:
163
+ SourceRange range_;
164
+ TreeList trees_;
165
+ };
166
+
167
+ // tree pretty printer
168
+ struct pretty_tree {
169
+ pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {}
170
+ const TreeRef& tree;
171
+ size_t col;
172
+ std::unordered_map<TreeRef, std::string> flat_strings;
173
+ const std::string& get_flat(const TreeRef& t) {
174
+ auto it = flat_strings.find(t);
175
+ if (it != flat_strings.end())
176
+ return it->second;
177
+
178
+ std::stringstream out;
179
+ switch (t->kind()) {
180
+ case TK_STRING:
181
+ out << t->stringValue();
182
+ break;
183
+ default:
184
+ out << "(" << kindToString(t->kind());
185
+ for (const auto& e : t->trees()) {
186
+ out << " " << get_flat(e);
187
+ }
188
+ out << ")";
189
+ break;
190
+ }
191
+ auto it_ = flat_strings.emplace(t, out.str());
192
+ return it_.first->second;
193
+ }
194
+ void print(std::ostream& out, const TreeRef& t, int indent) {
195
+ const std::string& s = get_flat(t);
196
+ if (indent + s.size() < col || t->isAtom()) {
197
+ out << s;
198
+ return;
199
+ }
200
+ std::string k = kindToString(t->kind());
201
+ out << "(" << k;
202
+ for (const auto& e : t->trees()) {
203
+ out << "\n" << std::string(indent + 2, ' ');
204
+ print(out, e, indent + 2);
205
+ }
206
+ out << ")";
207
+ }
208
+ };
209
+
210
+ static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) {
211
+ t_.print(out, t_.tree, 0);
212
+ return out << std::endl;
213
+ }
214
+
215
+ static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) {
216
+ return out << pretty_tree(t);
217
+ }
218
+
219
+ } // namespace jit
220
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h ADDED
@@ -0,0 +1,1275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/string_utils.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/strtod.h>
5
+ #include <torch/csrc/jit/frontend/tree.h>
6
+
7
+ #include <c10/util/complex.h>
8
+ #include <functional>
9
+ #include <iostream>
10
+ #include <string>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // clang-format off
17
+ // TreeView provides a statically-typed way to traverse the tree, which should
18
+ // be formed according to the grammar below.
19
+ //
20
+ // A few notes on types and their aliases:
21
+ // - List<T> is really a Tree with kind TK_LIST and elements as subtrees
22
+ // - Maybe<T> is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T
23
+ // - Builtin types are: Ident (TK_IDENT), String (TK_STRING)
24
+ //
25
+ // Param = Param(Maybe<Expr> type, Ident name) TK_PARAM
26
+ //
27
+ // Decl = Decl(List<Param> params, Maybe<Expr> return_type) TK_DECL
28
+ // Def = Def(Ident name, Decl decl, List<Stmt> body) TK_DEF
29
+ // ClassDef = ClassDef(Ident name, TK_CLASS_DEF
30
+ // Maybe<Expr> superclass,
31
+ // List<Stmt> body)
32
+ //
33
+ // Stmt = If(Expr cond, List<Stmt> true_body, List<Stmt> false_body) TK_IF
34
+ // | For(List<Expr> targets, List<Expr> iters, List<Stmt> body) TK_FOR
35
+ // | While(Expr cond, List<Stmt> body) TK_WHILE
36
+ // | Global(List<Ident> idents) TK_GLOBAL
37
+ // -- NB: the only type of Expr's allowed on lhs are Var
38
+ // Or a tuple containing Var with an optional terminating Starred
39
+ // | Assign(Expr lhs, Maybe<Expr> rhs, Maybe<Expr> type) TK_ASSIGN
40
+ // | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN
41
+ // | Return(List<Expr> values) TK_RETURN
42
+ // | ExprStmt(List<Expr> expr) TK_EXPR_STMT
43
+ // | Raise(Expr expr) TK_RAISE
44
+ // | Def TK_DEF
45
+ // | With(List<WithItem> targets, List<Stmt> body) TK_WITH
46
+ //
47
+ // Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR
48
+ // | BinOp(Expr lhs, Expr rhs)
49
+ // | And TK_AND
50
+ // | Or TK_OR
51
+ // | Lt '<'
52
+ // | Gt '>'
53
+ // | Eq TK_EQ
54
+ // | Le TK_LE
55
+ // | Ge TK_GE
56
+ // | Ne TK_NE
57
+ // | Is TK_IS
58
+ // | IsNot TK_ISNOT
59
+ // | Add '+'
60
+ // | Sub '-'
61
+ // | Mul '*'
62
+ // | Div '/'
63
+ // | Mod '%'
64
+ // | MatMult '@'
65
+ // | Pow TK_POW
66
+ // | UnaryOp(Expr expr)
67
+ // | Not TK_NOT
68
+ // | USub '-'
69
+ // | Const(String value) TK_CONST
70
+ // -- NB: x.name(y) is desugared into name(x, y)
71
+ // | Apply(Ident name, List<Expr> args, List<Attribute> kwargs) TK_APPLY
72
+ // | Select(Expr value, Ident selector) '.'
73
+ // | Subscript(Expr value, List<Expr> subscript_exprs) TK_SUBSCRIPT
74
+ // | SliceExpr(Maybe<Expr> start, Maybe<Expr> end) TK_SLICE_EXPR
75
+ // | Var(Ident name) TK_VAR
76
+ // | ListLiteral(List<Expr> inputs) TK_LIST_LITERAL
77
+ // | TupleLiteral(List<Expr> inputs) TK_TUPLE_LITERAL
78
+ // | Starred(Expr expr) TK_STARRED
79
+ // | WithItem(Expr target, Maybe<Var> var) TK_WITH_ITEM
80
+ // -- NB: only allowed expressions are Const or List(Const)
81
+ // (List as a value, not type constructor)
82
+ // Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE
83
+ //
84
+ // AugAssignKind =
85
+ // | Add() TK_PLUS_EQ
86
+ // | Sub() TK_MINUS_EQ
87
+ // | Mul() TK_TIMES_EQ
88
+ // | Div() TK_DIV_EQ
89
+ // | Mod() TK_MOD_EQ
90
+ //
91
+
92
+ // Each subclass of TreeView should provide:
93
+ // 1. Constructor that takes a TreeRef, and checks that it's of the right type.
94
+ // 2. Accessors that get underlying information out of the object. If they
95
+ // return subtrees, they should wrap them in appropriate views too.
96
+ // 3. Static method 'create' that creates the underlying TreeRef object
97
+ // for every TreeRef kind that has a TreeView, the parser always uses
98
+ // (e.g.) Ident::create rather than Compound::Create, this means that
99
+ // changes to the structure of Ident are always made right here rather
100
+ // than both in the parser and in this code.
101
+ // XXX: these structs should have no fields to prevent slicing when passing by value
102
+ // clang-format on
103
+ struct TreeView {
104
+ explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {}
105
+ TreeRef tree() const {
106
+ return tree_;
107
+ }
108
+ const SourceRange& range() const {
109
+ return tree_->range();
110
+ }
111
+ operator TreeRef() const {
112
+ return tree_;
113
+ }
114
+ const TreeRef& get() const {
115
+ return tree_;
116
+ }
117
+ int kind() const {
118
+ return tree_->kind();
119
+ }
120
+ void dump() const {
121
+ std::cout << tree_;
122
+ }
123
+
124
+ protected:
125
+ const TreeRef& subtree(size_t i) const {
126
+ return tree_->trees().at(i);
127
+ }
128
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
129
+ TreeRef tree_;
130
+ };
131
+
132
+ template <typename T>
133
+ struct ListIterator {
134
+ ListIterator(TreeList::const_iterator it) : it(it) {}
135
+ bool operator!=(const ListIterator& rhs) const {
136
+ return it != rhs.it;
137
+ }
138
+ bool operator==(const ListIterator& rhs) const {
139
+ return it == rhs.it;
140
+ }
141
+ T operator*() const {
142
+ return T(*it);
143
+ }
144
+ ListIterator& operator+=(std::ptrdiff_t n) {
145
+ it += n;
146
+ return *this;
147
+ }
148
+ ListIterator& operator++() {
149
+ ++it;
150
+ return *this;
151
+ }
152
+ ListIterator& operator--() {
153
+ --it;
154
+ return *this;
155
+ }
156
+
157
+ private:
158
+ TreeList::const_iterator it;
159
+ };
160
+
161
+ template <typename T>
162
+ struct List : public TreeView {
163
+ using iterator = ListIterator<T>;
164
+ using const_iterator = ListIterator<T>;
165
+
166
+ List(const TreeRef& tree) : TreeView(tree) {
167
+ tree->match(TK_LIST);
168
+ // Iterate over list to temporarily instantiate Ts that will check the type
169
+ for (const T& elem : *this) {
170
+ (void)elem; // silence unused warning
171
+ }
172
+ }
173
+ iterator begin() const {
174
+ return iterator(tree_->trees().begin());
175
+ }
176
+ iterator end() const {
177
+ return iterator(tree_->trees().end());
178
+ }
179
+ bool empty() const {
180
+ return tree_->trees().begin() == tree_->trees().end();
181
+ }
182
+ T operator[](size_t i) const {
183
+ return T(subtree(i));
184
+ }
185
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
186
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
187
+ }
188
+ static List create(const SourceRange& range, const std::vector<T>& subtrees) {
189
+ TreeList type_erased_sub{subtrees.begin(), subtrees.end()};
190
+ return List(Compound::create(TK_LIST, range, std::move(type_erased_sub)));
191
+ }
192
+ static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) {
193
+ return List(Compound::create(TK_LIST, range, std::move(subtrees)));
194
+ }
195
+ size_t size() const {
196
+ return tree_->trees().size();
197
+ }
198
+ };
199
+
200
+ template <typename T>
201
+ struct Maybe : public TreeView {
202
+ explicit Maybe(const TreeRef& tree) : TreeView(tree) {
203
+ tree_->match(TK_OPTION);
204
+ if (tree_->trees().size() > 1)
205
+ throw ErrorReport(tree) << "Maybe trees can have at most one subtree";
206
+ }
207
+ /* implicit */ Maybe(const T& tree) : TreeView(tree) {}
208
+ bool present() const {
209
+ return tree_->trees().size() > 0;
210
+ }
211
+ T get() const {
212
+ return T(tree_->trees().at(0));
213
+ }
214
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
215
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
216
+ }
217
+ static Maybe<T> create(const SourceRange& range) {
218
+ return Maybe<T>(Compound::create(TK_OPTION, range, {}));
219
+ }
220
+ static Maybe<T> create(const SourceRange& range, const T& value) {
221
+ return Maybe<T>(Compound::create(TK_OPTION, range, {value}));
222
+ }
223
+ };
224
+
225
+ struct Ident : public TreeView {
226
+ explicit Ident(const TreeRef& tree) : TreeView(tree) {
227
+ tree_->match(TK_IDENT);
228
+ }
229
+ const std::string& name() const {
230
+ return subtree(0)->stringValue();
231
+ }
232
+ static Ident create(const SourceRange& range, std::string name) {
233
+ return Ident(
234
+ Compound::create(TK_IDENT, range, {String::create(std::move(name))}));
235
+ }
236
+ };
237
+
238
+ ////////////////////////////////////////////////////////////////////////////////
239
+ // Base types (production LHS)
240
+ ////////////////////////////////////////////////////////////////////////////////
241
+
242
+ struct Stmt : public TreeView {
243
+ explicit Stmt(const TreeRef& tree) : TreeView(tree) {
244
+ switch (tree->kind()) {
245
+ case TK_IF:
246
+ case TK_FOR:
247
+ case TK_WHILE:
248
+ case TK_GLOBAL:
249
+ case TK_ASSIGN:
250
+ case TK_AUG_ASSIGN:
251
+ case TK_RETURN:
252
+ case TK_EXPR_STMT:
253
+ case TK_RAISE:
254
+ case TK_ASSERT:
255
+ case TK_PASS:
256
+ case TK_BREAK:
257
+ case TK_DELETE:
258
+ case TK_CONTINUE:
259
+ case TK_DEF:
260
+ case TK_WITH:
261
+ return;
262
+ default:
263
+ throw ErrorReport(tree)
264
+ << kindToString(tree->kind()) << " is not a valid Stmt";
265
+ }
266
+ }
267
+ };
268
+
269
+ struct Expr : public TreeView {
270
+ explicit Expr(const TreeRef& tree) : TreeView(tree) {
271
+ switch (tree->kind()) {
272
+ case TK_IF_EXPR:
273
+ case TK_AND:
274
+ case TK_OR:
275
+ case '<':
276
+ case '>':
277
+ case TK_IS:
278
+ case TK_ISNOT:
279
+ case TK_EQ:
280
+ case TK_LE:
281
+ case TK_GE:
282
+ case TK_NE:
283
+ case '+':
284
+ case '-':
285
+ case TK_UNARY_MINUS:
286
+ case '~':
287
+ case '*':
288
+ case TK_STARRED:
289
+ case '/':
290
+ case '%':
291
+ case TK_NOT:
292
+ case TK_CONST:
293
+ case TK_STRINGLITERAL:
294
+ case TK_TRUE:
295
+ case TK_FALSE:
296
+ case TK_NONE:
297
+ case TK_NONE_TYPE:
298
+ case TK_CAST:
299
+ case TK_APPLY:
300
+ case '.':
301
+ case TK_SUBSCRIPT:
302
+ case TK_SLICE_EXPR:
303
+ case TK_VAR:
304
+ case TK_LIST_LITERAL:
305
+ case TK_TUPLE_LITERAL:
306
+ case TK_DICT_LITERAL:
307
+ case '@':
308
+ case TK_POW:
309
+ case TK_LSHIFT:
310
+ case TK_RSHIFT:
311
+ case TK_FLOOR_DIV:
312
+ case '&':
313
+ case '^':
314
+ case '|':
315
+ case TK_LIST_COMP:
316
+ case TK_DICT_COMP:
317
+ case TK_DOTS:
318
+ case TK_IN:
319
+ case TK_WITH_ITEM:
320
+ return;
321
+ default:
322
+ throw ErrorReport(tree)
323
+ << kindToString(tree->kind()) << " is not a valid Expr";
324
+ }
325
+ }
326
+ };
327
+
328
+ ////////////////////////////////////////////////////////////////////////////////
329
+ // Helper nodes (mostly for function arguments)
330
+ ////////////////////////////////////////////////////////////////////////////////
331
+
332
+ struct Attribute : public TreeView {
333
+ explicit Attribute(const TreeRef& tree) : TreeView(tree) {
334
+ tree_->match(TK_ATTRIBUTE);
335
+ }
336
+ Ident name() const {
337
+ return Ident(subtree(0));
338
+ }
339
+ Expr value() const {
340
+ return Expr(subtree(1));
341
+ }
342
+ static Attribute create(
343
+ const SourceRange& range,
344
+ const Ident& name,
345
+ const TreeRef& value) {
346
+ return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value}));
347
+ }
348
+ };
349
+
350
+ struct Param : public TreeView {
351
+ explicit Param(const TreeRef& tree) : TreeView(tree) {
352
+ tree_->match(TK_PARAM);
353
+ }
354
+ static Param create(
355
+ const SourceRange& range,
356
+ const Ident& ident,
357
+ const Maybe<Expr>& type,
358
+ const Maybe<Expr>& def,
359
+ bool kwarg_only) {
360
+ TreeRef kwarg_only_tree =
361
+ Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {});
362
+ return Param(Compound::create(
363
+ TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)}));
364
+ }
365
+ Ident ident() const {
366
+ return Ident(subtree(0));
367
+ }
368
+ Maybe<Expr> type() const {
369
+ return Maybe<Expr>(subtree(1));
370
+ }
371
+ Maybe<Expr> defaultValue() const {
372
+ return Maybe<Expr>(subtree(2));
373
+ }
374
+ bool kwarg_only() const {
375
+ return TK_TRUE == subtree(3)->kind();
376
+ }
377
+ Param withType(const Maybe<Expr>& typ) const {
378
+ return Param::create(range(), ident(), typ, defaultValue(), kwarg_only());
379
+ }
380
+ };
381
+
382
+ ////////////////////////////////////////////////////////////////////////////////
383
+ // Top level definitions
384
+ ////////////////////////////////////////////////////////////////////////////////
385
+
386
+ struct Decl : public TreeView {
387
+ explicit Decl(const TreeRef& tree) : TreeView(tree) {
388
+ tree->match(TK_DECL);
389
+ }
390
+ List<Param> params() const {
391
+ return List<Param>(subtree(0));
392
+ }
393
+ Maybe<Expr> return_type() const {
394
+ return Maybe<Expr>(subtree(1));
395
+ }
396
+ static Decl create(
397
+ const SourceRange& range,
398
+ const List<Param>& params,
399
+ const Maybe<Expr>& return_type) {
400
+ return Decl(Compound::create(TK_DECL, range, {params, return_type}));
401
+ }
402
+ };
403
+
404
+ struct Def : public TreeView {
405
+ explicit Def(const TreeRef& tree) : TreeView(tree) {
406
+ tree->match(TK_DEF);
407
+ }
408
+ Def withName(std::string new_name) const {
409
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
410
+ return create(range(), new_ident, decl(), statements());
411
+ }
412
+ Def withDecl(const Decl& decl) const {
413
+ return create(range(), name(), decl, statements());
414
+ }
415
+ Ident name() const {
416
+ return Ident(subtree(0));
417
+ }
418
+ Decl decl() const {
419
+ return Decl(subtree(1));
420
+ }
421
+ List<Stmt> statements() const {
422
+ return List<Stmt>(subtree(2));
423
+ }
424
+ static Def create(
425
+ const SourceRange& range,
426
+ const Ident& name,
427
+ const Decl& decl,
428
+ const List<Stmt>& stmts) {
429
+ return Def(Compound::create(TK_DEF, range, {name, decl, stmts}));
430
+ }
431
+ };
432
+
433
+ // Property represents a named attribute combined with a getter and setter
434
+ // method to access and mutate that attribute.
435
+ struct Property : public TreeView {
436
+ explicit Property(const TreeRef& tree) : TreeView(tree) {
437
+ tree->match(TK_PROP);
438
+ }
439
+ Ident name() const {
440
+ return Ident(subtree(0));
441
+ }
442
+ Def getter() const {
443
+ return Def(subtree(1));
444
+ }
445
+ Maybe<Def> setter() const {
446
+ return Maybe<Def>(subtree(2));
447
+ }
448
+ static Property create(
449
+ const SourceRange& range,
450
+ const Ident& name,
451
+ const Def& getter,
452
+ const Maybe<Def>& setter) {
453
+ return Property(Compound::create(TK_PROP, range, {name, getter, setter}));
454
+ }
455
+ };
456
+
457
+ struct Assign;
458
+
459
+ struct ClassDef : public TreeView {
460
+ explicit ClassDef(const TreeRef& tree) : TreeView(tree) {
461
+ tree->match(TK_CLASS_DEF);
462
+ }
463
+ explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) {
464
+ tree_->match(TK_CLASS_DEF);
465
+ }
466
+ ClassDef withName(std::string new_name) const {
467
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
468
+ return create(range(), new_ident, superclass(), body());
469
+ }
470
+ Ident name() const {
471
+ return Ident(subtree(0));
472
+ }
473
+ Maybe<Expr> superclass() const {
474
+ return Maybe<Expr>(subtree(1));
475
+ }
476
+ List<Stmt> body() const {
477
+ return List<Stmt>(subtree(2));
478
+ }
479
+ Maybe<List<Property>> properties() const {
480
+ return Maybe<List<Property>>(subtree(3));
481
+ }
482
+ Maybe<List<Assign>> assigns() const {
483
+ return Maybe<List<Assign>>(subtree(4));
484
+ }
485
+ static ClassDef create(
486
+ const SourceRange& range,
487
+ const Ident& name,
488
+ const Maybe<Expr>& superclass,
489
+ const List<Stmt>& body) {
490
+ return ClassDef(Compound::create(
491
+ TK_CLASS_DEF,
492
+ range,
493
+ {name,
494
+ superclass,
495
+ body,
496
+ Maybe<List<Property>>::create(range),
497
+ Maybe<List<Assign>>::create(range)}));
498
+ }
499
+ static ClassDef create(
500
+ const SourceRange& range,
501
+ const Ident& name,
502
+ const Maybe<Expr>& superclass,
503
+ const List<Stmt>& body,
504
+ const List<Property>& properties,
505
+ const List<Assign>& assigns);
506
+ };
507
+
508
+ TORCH_API std::vector<std::string> getUnresolvedClassAttributes(
509
+ const ClassDef& def);
510
+
511
+ ////////////////////////////////////////////////////////////////////////////////
512
+ // Statements
513
+ ////////////////////////////////////////////////////////////////////////////////
514
+
515
+ struct If : public Stmt {
516
+ explicit If(const TreeRef& tree) : Stmt(tree) {
517
+ tree_->match(TK_IF);
518
+ }
519
+ Expr cond() const {
520
+ return Expr(subtree(0));
521
+ }
522
+ List<Stmt> trueBranch() const {
523
+ return List<Stmt>(subtree(1));
524
+ }
525
+ List<Stmt> falseBranch() const {
526
+ return List<Stmt>(subtree(2));
527
+ }
528
+ If withNewBranches(
529
+ const List<Stmt>& true_branch,
530
+ const List<Stmt>& false_branch) const {
531
+ return create(range(), cond(), true_branch, false_branch);
532
+ }
533
+ static If create(
534
+ const SourceRange& range,
535
+ const Expr& cond,
536
+ const List<Stmt>& true_branch,
537
+ const List<Stmt>& false_branch) {
538
+ return If(
539
+ Compound::create(TK_IF, range, {cond, true_branch, false_branch}));
540
+ }
541
+ };
542
+
543
+ struct While : public Stmt {
544
+ explicit While(const TreeRef& tree) : Stmt(tree) {
545
+ tree_->match(TK_WHILE);
546
+ }
547
+ Expr cond() const {
548
+ return Expr(subtree(0));
549
+ }
550
+ List<Stmt> body() const {
551
+ return List<Stmt>(subtree(1));
552
+ }
553
+ static While create(
554
+ const SourceRange& range,
555
+ const Expr& cond,
556
+ const List<Stmt>& body) {
557
+ return While(Compound::create(TK_WHILE, range, {cond, body}));
558
+ }
559
+ };
560
+
561
+ struct For : public Stmt {
562
+ explicit For(const TreeRef& tree) : Stmt(tree) {
563
+ tree->match(TK_FOR);
564
+ }
565
+ List<Expr> targets() const {
566
+ return List<Expr>(subtree(0));
567
+ }
568
+ List<Expr> itrs() const {
569
+ return List<Expr>(subtree(1));
570
+ }
571
+ List<Stmt> body() const {
572
+ return List<Stmt>(subtree(2));
573
+ }
574
+ static For create(
575
+ const SourceRange& range,
576
+ const List<Expr>& targets,
577
+ const List<Expr>& itrs,
578
+ const List<Stmt>& body) {
579
+ return For(Compound::create(TK_FOR, range, {targets, itrs, body}));
580
+ }
581
+ };
582
+
583
+ // TODO: supports only single comprehension for now
584
+ struct ListComp : public Expr {
585
+ explicit ListComp(const TreeRef& tree) : Expr(tree) {
586
+ tree->match(TK_LIST_COMP);
587
+ }
588
+ Expr elt() const {
589
+ return Expr(subtree(0));
590
+ }
591
+ Expr target() const {
592
+ return Expr(subtree(1));
593
+ }
594
+ Expr iter() const {
595
+ return Expr(subtree(2));
596
+ }
597
+ // TODO: no ifs for now
598
+ static ListComp create(
599
+ const SourceRange& range,
600
+ const Expr& elt,
601
+ const Expr& target,
602
+ const Expr& iter) {
603
+ return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter}));
604
+ }
605
+ };
606
+
607
+ // TODO: supports only single comprehension for now
608
+ struct DictComp : public Expr {
609
+ explicit DictComp(const TreeRef& tree) : Expr(tree) {
610
+ tree->match(TK_DICT_COMP);
611
+ }
612
+ Expr key() const {
613
+ return Expr(subtree(0));
614
+ }
615
+ Expr value() const {
616
+ return Expr(subtree(1));
617
+ }
618
+ Expr target() const {
619
+ return Expr(subtree(2));
620
+ }
621
+ Expr iter() const {
622
+ return Expr(subtree(3));
623
+ }
624
+ // TODO: no ifs for now
625
+ static DictComp create(
626
+ const SourceRange& range,
627
+ const Expr& key,
628
+ const Expr& value,
629
+ const Expr& target,
630
+ const Expr& iter) {
631
+ return DictComp(
632
+ Compound::create(TK_DICT_COMP, range, {key, value, target, iter}));
633
+ }
634
+ };
635
+
636
+ struct Global : public Stmt {
637
+ explicit Global(const TreeRef& tree) : Stmt(tree) {
638
+ tree_->match(TK_GLOBAL);
639
+ }
640
+ List<Ident> names() {
641
+ return List<Ident>(subtree(0));
642
+ }
643
+ static Global create(const SourceRange& range, const List<Ident>& names) {
644
+ return Global(Compound::create(TK_GLOBAL, range, {names}));
645
+ }
646
+ };
647
+
648
+ struct AugAssignKind : public TreeView {
649
+ explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) {
650
+ switch (tree->kind()) {
651
+ case '+':
652
+ case '-':
653
+ case '*':
654
+ case '/':
655
+ case '%':
656
+ case '|':
657
+ case '&':
658
+ case '^':
659
+ case TK_POW:
660
+ case TK_LSHIFT:
661
+ case TK_RSHIFT:
662
+ return;
663
+ default:
664
+ throw ErrorReport(tree) << "is not a valid AugAssignKind";
665
+ }
666
+ }
667
+ };
668
+
669
+ // Augmented assignment, like "foo += bar"
670
+ struct AugAssign : public Stmt {
671
+ explicit AugAssign(const TreeRef& tree) : Stmt(tree) {
672
+ tree_->match(TK_AUG_ASSIGN);
673
+ }
674
+ static AugAssign create(
675
+ const SourceRange& range,
676
+ const Expr& lhs,
677
+ const AugAssignKind& aug_op,
678
+ const Expr& rhs) {
679
+ return AugAssign(
680
+ Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs}));
681
+ }
682
+ Expr lhs() const {
683
+ return Expr(subtree(0));
684
+ }
685
+ int aug_op() const {
686
+ return subtree(1)->kind();
687
+ }
688
+ Expr rhs() const {
689
+ return Expr(subtree(2));
690
+ }
691
+ };
692
+
693
+ struct Assign : public Stmt {
694
+ explicit Assign(const TreeRef& tree) : Stmt(tree) {
695
+ tree_->match(TK_ASSIGN);
696
+ }
697
+ static Assign create(
698
+ const SourceRange& range,
699
+ const List<Expr>& lhs,
700
+ const Maybe<Expr>& rhs,
701
+ const Maybe<Expr>& type) {
702
+ return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type}));
703
+ }
704
+
705
+ List<Expr> lhs_list() const {
706
+ return List<Expr>(subtree(0));
707
+ }
708
+
709
+ Expr lhs() const {
710
+ const auto& li = lhs_list();
711
+ TORCH_INTERNAL_ASSERT(li.size() == 1);
712
+ return *li.begin();
713
+ }
714
+
715
+ Maybe<Expr> rhs() const {
716
+ return Maybe<Expr>(subtree(1));
717
+ }
718
+
719
+ Maybe<Expr> type() const {
720
+ return Maybe<Expr>(subtree(2));
721
+ }
722
+ };
723
+
724
+ struct Return : public Stmt {
725
+ explicit Return(const TreeRef& tree) : Stmt(tree) {
726
+ tree_->match(TK_RETURN);
727
+ }
728
+ Expr expr() const {
729
+ return Expr(subtree(0));
730
+ }
731
+ static Return create(const SourceRange& range, const Expr& value) {
732
+ return Return(Compound::create(TK_RETURN, range, {value}));
733
+ }
734
+ };
735
+
736
+ struct Raise : public Stmt {
737
+ explicit Raise(const TreeRef& tree) : Stmt(tree) {
738
+ tree_->match(TK_RAISE);
739
+ }
740
+ Expr expr() const {
741
+ return Expr(subtree(0));
742
+ }
743
+ static Raise create(const SourceRange& range, const Expr& expr) {
744
+ return Raise(Compound::create(TK_RAISE, range, {expr}));
745
+ }
746
+ };
747
+
748
+ struct Assert : public Stmt {
749
+ explicit Assert(const TreeRef& tree) : Stmt(tree) {
750
+ tree_->match(TK_ASSERT);
751
+ }
752
+ Expr test() const {
753
+ return Expr(subtree(0));
754
+ }
755
+ Maybe<Expr> msg() const {
756
+ return Maybe<Expr>(subtree(1));
757
+ }
758
+ static Assert create(
759
+ const SourceRange& range,
760
+ const Expr& test,
761
+ const Maybe<Expr>& msg) {
762
+ return Assert(Compound::create(TK_ASSERT, range, {test, msg}));
763
+ }
764
+ };
765
+
766
+ struct Pass : public Stmt {
767
+ explicit Pass(const TreeRef& tree) : Stmt(tree) {
768
+ tree_->match(TK_PASS);
769
+ }
770
+ static Pass create(const SourceRange& range) {
771
+ return Pass(Compound::create(TK_PASS, range, {}));
772
+ }
773
+ };
774
+
775
+ struct Dots : public Expr {
776
+ explicit Dots(const TreeRef& tree) : Expr(tree) {
777
+ tree_->match(TK_DOTS);
778
+ }
779
+ static Dots create(const SourceRange& range) {
780
+ return Dots(Compound::create(TK_DOTS, range, {}));
781
+ }
782
+ };
783
+
784
+ struct Break : public Stmt {
785
+ explicit Break(const TreeRef& tree) : Stmt(tree) {
786
+ tree_->match(TK_BREAK);
787
+ }
788
+ static Break create(const SourceRange& range) {
789
+ return Break(Compound::create(TK_BREAK, range, {}));
790
+ }
791
+ };
792
+
793
+ struct Continue : public Stmt {
794
+ explicit Continue(const TreeRef& tree) : Stmt(tree) {
795
+ tree_->match(TK_CONTINUE);
796
+ }
797
+ static Continue create(const SourceRange& range) {
798
+ return Continue(Compound::create(TK_CONTINUE, range, {}));
799
+ }
800
+ };
801
+
802
+ struct ExprStmt : public Stmt {
803
+ explicit ExprStmt(const TreeRef& tree) : Stmt(tree) {
804
+ tree_->match(TK_EXPR_STMT);
805
+ }
806
+ Expr expr() {
807
+ return Expr(subtree(0));
808
+ }
809
+ static ExprStmt create(const SourceRange& range, const Expr& list) {
810
+ return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list}));
811
+ }
812
+ };
813
+
814
+ ////////////////////////////////////////////////////////////////////////////////
815
+ // Expressions
816
+ ////////////////////////////////////////////////////////////////////////////////
817
+
818
+ struct BinOp : public Expr {
819
+ explicit BinOp(const TreeRef& tree) : Expr(tree) {
820
+ switch (tree->kind()) {
821
+ case TK_AND:
822
+ case TK_OR:
823
+ case '<':
824
+ case '>':
825
+ case TK_IS:
826
+ case TK_ISNOT:
827
+ case TK_EQ:
828
+ case TK_LE:
829
+ case TK_GE:
830
+ case TK_NE:
831
+ case '+':
832
+ case '*':
833
+ case '/':
834
+ case '-':
835
+ case '@':
836
+ case TK_POW:
837
+ case TK_LSHIFT:
838
+ case TK_RSHIFT:
839
+ case '%':
840
+ case '&':
841
+ case '^':
842
+ case '|':
843
+ case TK_FLOOR_DIV:
844
+ case TK_IN:
845
+ if (tree->trees().size() != 2)
846
+ throw ErrorReport(tree)
847
+ << "BinOp expected 2 subtrees, found " << tree->trees().size();
848
+ return;
849
+ default:
850
+ throw ErrorReport(tree)
851
+ << kindToString(tree->kind()) << " is not a valid BinOp";
852
+ }
853
+ }
854
+ Expr lhs() const {
855
+ return Expr(subtree(0));
856
+ }
857
+ Expr rhs() const {
858
+ return Expr(subtree(1));
859
+ }
860
+ static BinOp create(
861
+ const SourceRange& range,
862
+ int kind,
863
+ const Expr& lhs,
864
+ const Expr& rhs) {
865
+ return BinOp(Compound::create(kind, range, {lhs, rhs}));
866
+ }
867
+ };
868
+
869
+ struct UnaryOp : public Expr {
870
+ explicit UnaryOp(const TreeRef& tree) : Expr(tree) {
871
+ switch (tree->kind()) {
872
+ case TK_UNARY_MINUS:
873
+ case '~':
874
+ case TK_NOT:
875
+ if (tree->trees().size() != 1)
876
+ throw ErrorReport(tree)
877
+ << "UnaryOp expected 1 subtree, found " << tree->trees().size();
878
+ return;
879
+ default:
880
+ throw ErrorReport(tree)
881
+ << kindToString(tree->kind()) << " is not a valid UnaryOp";
882
+ }
883
+ }
884
+ static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) {
885
+ return UnaryOp(Compound::create(kind, range, {expr}));
886
+ }
887
+ };
888
+
889
+ struct Const : public Expr {
890
+ explicit Const(const TreeRef& tree) : Expr(tree) {
891
+ tree_->matchNumSubtrees(TK_CONST, 1);
892
+ }
893
+ bool isFloatingPoint() const {
894
+ if (isComplex())
895
+ return false;
896
+
897
+ bool is_inf = subtree(0)->stringValue() == "inf";
898
+ return is_inf ||
899
+ subtree(0)->stringValue().find_first_of(".eE") != std::string::npos;
900
+ }
901
+ bool isIntegral() const {
902
+ return !isFloatingPoint() && !isComplex();
903
+ }
904
+ bool isComplex() const {
905
+ return subtree(0)->stringValue().find_first_of('j') != std::string::npos;
906
+ }
907
+ int64_t asIntegral() const {
908
+ try {
909
+ // NOLINTNEXTLINE(modernize-use-nullptr)
910
+ return std::stoll(subtree(0)->stringValue(), /*__idx=*/0, /*base=*/0);
911
+ } catch (const std::out_of_range&) {
912
+ throw ErrorReport(range()) << "Integral constant out of range "
913
+ "(must fit in a signed 64 bit integer)";
914
+ }
915
+ }
916
+ double asFloatingPoint() const {
917
+ // We can't pass in nullptr as the dummy pointer gets dereferenced for
918
+ // Android version of strtod_c().
919
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
920
+ char* dummy;
921
+ return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy);
922
+ }
923
+ c10::complex<double> asComplex() const {
924
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
925
+ char* dummy;
926
+ auto str = subtree(0)->stringValue();
927
+ // Complex numbers (a+bj, where a is non-zero) are parsed as an addition
928
+ // between float/int a and a complex number "bj". When a is 0, a complex
929
+ // number bj is created as above. So, while parsing the string, we don't
930
+ // have to worry about the real component of the complex number.
931
+ auto imag =
932
+ torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy);
933
+ return c10::complex<double>(0, imag);
934
+ }
935
+ const std::string& text() const {
936
+ return subtree(0)->stringValue();
937
+ }
938
+ static Const create(const SourceRange& range, const std::string& value) {
939
+ return Const(Compound::create(TK_CONST, range, {String::create(value)}));
940
+ }
941
+ };
942
+
943
+ struct StringLiteral : public Expr {
944
+ explicit StringLiteral(const TreeRef& tree) : Expr(tree) {
945
+ tree_->matchNumSubtrees(TK_STRINGLITERAL, 1);
946
+ }
947
+ const std::string& text() const {
948
+ return subtree(0)->stringValue();
949
+ }
950
+ static StringLiteral create(
951
+ const SourceRange& range,
952
+ const std::string& value) {
953
+ return StringLiteral(
954
+ Compound::create(TK_STRINGLITERAL, range, {String::create(value)}));
955
+ }
956
+ };
957
+
958
+ struct Apply : public Expr {
959
+ explicit Apply(const TreeRef& tree) : Expr(tree) {
960
+ tree_->match(TK_APPLY);
961
+ }
962
+ Expr callee() const {
963
+ return Expr(subtree(0));
964
+ }
965
+ List<Expr> inputs() const {
966
+ return List<Expr>(subtree(1));
967
+ }
968
+ List<Attribute> attributes() const {
969
+ return List<Attribute>(subtree(2));
970
+ }
971
+ static Apply create(
972
+ const SourceRange& range,
973
+ const Expr& callee,
974
+ const List<Expr>& inputs,
975
+ const List<Attribute>& attributes) {
976
+ return Apply(
977
+ Compound::create(TK_APPLY, range, {callee, inputs, attributes}));
978
+ }
979
+ };
980
+
981
+ struct Select : public Expr {
982
+ explicit Select(const TreeRef& tree) : Expr(tree) {
983
+ tree_->match('.');
984
+ }
985
+ Expr value() const {
986
+ return Expr(subtree(0));
987
+ }
988
+ Ident selector() const {
989
+ return Ident(subtree(1));
990
+ }
991
+ static Select create(
992
+ const SourceRange& range,
993
+ const Expr& value,
994
+ const Ident& selector) {
995
+ return Select(Compound::create('.', range, {value, selector}));
996
+ }
997
+ };
998
+
999
+ struct SliceExpr : public Expr {
1000
+ explicit SliceExpr(const TreeRef& tree) : Expr(tree) {
1001
+ tree_->match(TK_SLICE_EXPR);
1002
+ }
1003
+ Maybe<Expr> start() const {
1004
+ return Maybe<Expr>(subtree(0));
1005
+ }
1006
+ Maybe<Expr> end() const {
1007
+ return Maybe<Expr>(subtree(1));
1008
+ }
1009
+ Maybe<Expr> step() const {
1010
+ return Maybe<Expr>(subtree(2));
1011
+ }
1012
+ Expr startOr(int64_t alternative) const {
1013
+ const auto startOption = start();
1014
+ return startOption.present() ? startOption.get() : createInt(alternative);
1015
+ }
1016
+ Expr endOr(int64_t alternative) const {
1017
+ const auto endOption = end();
1018
+ return endOption.present() ? endOption.get() : createInt(alternative);
1019
+ }
1020
+ Expr stepOr(int64_t alternative) const {
1021
+ const auto stepOption = step();
1022
+ return stepOption.present() ? stepOption.get() : createInt(alternative);
1023
+ }
1024
+ static SliceExpr create(
1025
+ const SourceRange& range,
1026
+ const Maybe<Expr>& start,
1027
+ const Maybe<Expr>& end,
1028
+ const Maybe<Expr>& step) {
1029
+ return SliceExpr(
1030
+ Compound::create(TK_SLICE_EXPR, range, {start, end, step}));
1031
+ }
1032
+
1033
+ private:
1034
+ Expr createInt(int64_t value) const {
1035
+ return Expr(Const::create(range(), c10::to_string(value)));
1036
+ }
1037
+ };
1038
+
1039
+ struct Subscript : public Expr {
1040
+ explicit Subscript(const TreeRef& tree) : Expr(tree) {
1041
+ tree_->match(TK_SUBSCRIPT);
1042
+ }
1043
+ Expr value() const {
1044
+ return Expr(subtree(0));
1045
+ }
1046
+ List<Expr> subscript_exprs() const {
1047
+ return List<Expr>(subtree(1));
1048
+ }
1049
+ static Subscript create(
1050
+ const SourceRange& range,
1051
+ const Expr& value,
1052
+ const List<Expr>& subscript_exprs) {
1053
+ auto whole_range = SourceRange(
1054
+ range.source(), range.start(), subscript_exprs.range().end() + 1);
1055
+ return Subscript(
1056
+ Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs}));
1057
+ }
1058
+ };
1059
+
1060
+ struct Var : public Expr {
1061
+ explicit Var(const TreeRef& tree) : Expr(tree) {
1062
+ tree_->match(TK_VAR);
1063
+ };
1064
+ Ident name() const {
1065
+ return Ident(subtree(0));
1066
+ }
1067
+ static Var create(const SourceRange& range, const Ident& name) {
1068
+ return Var(Compound::create(TK_VAR, range, {name}));
1069
+ }
1070
+ };
1071
+
1072
+ // WithItem represents an item using with a WithStmt.
1073
+ struct WithItem : public Expr {
1074
+ explicit WithItem(const TreeRef& tree) : Expr(tree) {
1075
+ tree_->match(TK_WITH_ITEM);
1076
+ }
1077
+
1078
+ Expr target() const {
1079
+ return Expr(subtree(0));
1080
+ }
1081
+
1082
+ Maybe<Var> var() const {
1083
+ return Maybe<Var>(subtree(1));
1084
+ }
1085
+
1086
+ static WithItem create(
1087
+ const SourceRange& range,
1088
+ const Expr& target,
1089
+ const Maybe<Var>& var) {
1090
+ return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var}));
1091
+ }
1092
+ };
1093
+
1094
+ // With represents a with statement consisting of a list of with items and a
1095
+ // body of statements.
1096
+ struct With : public Stmt {
1097
+ explicit With(const TreeRef& tree) : Stmt(tree) {
1098
+ tree_->match(TK_WITH);
1099
+ }
1100
+
1101
+ List<WithItem> targets() const {
1102
+ return List<WithItem>(subtree(0));
1103
+ }
1104
+
1105
+ List<Stmt> body() const {
1106
+ return List<Stmt>(subtree(1));
1107
+ }
1108
+
1109
+ static With create(
1110
+ const SourceRange& range,
1111
+ const List<WithItem>& targets,
1112
+ const List<Stmt>& body) {
1113
+ return With(Compound::create(TK_WITH, range, {targets, body}));
1114
+ }
1115
+ };
1116
+
1117
+ struct TernaryIf : public Expr {
1118
+ explicit TernaryIf(const TreeRef& tree) : Expr(tree) {
1119
+ tree_->matchNumSubtrees(TK_IF_EXPR, 3);
1120
+ };
1121
+ Expr cond() const {
1122
+ return Expr(subtree(0));
1123
+ }
1124
+ Expr true_expr() const {
1125
+ return Expr(subtree(1));
1126
+ }
1127
+ Expr false_expr() const {
1128
+ return Expr(subtree(2));
1129
+ }
1130
+ static TernaryIf create(
1131
+ const SourceRange& range,
1132
+ const Expr& cond,
1133
+ const Expr& true_expr,
1134
+ const Expr& false_expr) {
1135
+ return TernaryIf(
1136
+ Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr}));
1137
+ };
1138
+ };
1139
+
1140
+ struct ListLiteral : public Expr {
1141
+ explicit ListLiteral(const TreeRef& tree) : Expr(tree) {
1142
+ tree_->match(TK_LIST_LITERAL);
1143
+ }
1144
+ List<Expr> inputs() const {
1145
+ return subtree(0);
1146
+ }
1147
+ static ListLiteral create(
1148
+ const SourceRange& range,
1149
+ const List<Expr>& inputs) {
1150
+ return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs}));
1151
+ }
1152
+ };
1153
+
1154
+ struct TupleLiteral : public Expr {
1155
+ explicit TupleLiteral(const TreeRef& tree) : Expr(tree) {
1156
+ tree_->match(TK_TUPLE_LITERAL);
1157
+ }
1158
+ List<Expr> inputs() const {
1159
+ return subtree(0);
1160
+ }
1161
+ static TupleLiteral create(
1162
+ const SourceRange& range,
1163
+ const List<Expr>& inputs) {
1164
+ return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs}));
1165
+ }
1166
+ };
1167
+
1168
+ struct DictLiteral : public Expr {
1169
+ explicit DictLiteral(const TreeRef& tree) : Expr(tree) {
1170
+ tree_->match(TK_DICT_LITERAL);
1171
+ }
1172
+ List<Expr> key_inputs() const {
1173
+ return subtree(0);
1174
+ }
1175
+ List<Expr> value_inputs() const {
1176
+ return subtree(1);
1177
+ }
1178
+ static DictLiteral create(
1179
+ const SourceRange& range,
1180
+ const List<Expr>& keys,
1181
+ const List<Expr>& values) {
1182
+ return DictLiteral(
1183
+ Compound::create(TK_DICT_LITERAL, range, {keys, values}));
1184
+ }
1185
+ };
1186
+
1187
+ struct Starred : public Expr {
1188
+ explicit Starred(const TreeRef& tree) : Expr(tree) {
1189
+ tree_->match(TK_STARRED);
1190
+ }
1191
+ Expr expr() const {
1192
+ return Expr(subtree(0));
1193
+ }
1194
+ static Starred create(const SourceRange& range, const Expr& expr) {
1195
+ return Starred(Compound::create(TK_STARRED, range, {expr}));
1196
+ }
1197
+ };
1198
+
1199
+ struct Delete : public Stmt {
1200
+ explicit Delete(const TreeRef& tree) : Stmt(tree) {
1201
+ tree_->match(TK_DELETE);
1202
+ }
1203
+ List<Expr> targets() const {
1204
+ return subtree(0);
1205
+ }
1206
+ static Delete create(const SourceRange& range, const List<Expr>& targets) {
1207
+ return Delete(Compound::create(TK_DELETE, range, {targets}));
1208
+ }
1209
+ };
1210
+
1211
+ /*
1212
+ * NOTE: transforming PEP 604 union into equivalent union type
1213
+ *
1214
+ * NOTE: Union[int, float] parses into:
1215
+ * <EXPR> expr:(subscript
1216
+ * (variable (ident Union))
1217
+ * (list
1218
+ * (variable (ident int))
1219
+ * (variable (ident float))))
1220
+ * <KIND> subscript
1221
+ *
1222
+ * NOTE: (int | float) parses into:
1223
+ * <EXPR> expr:(|
1224
+ * (variable (ident int))
1225
+ * (variable (ident float)))
1226
+ * <KIND> |
1227
+ */
1228
+
1229
+ inline void _flatten_pep604_union(
1230
+ const torch::jit::Expr& node,
1231
+ std::vector<torch::jit::Expr>* result) {
1232
+ // flatten possibly nested union expressions like (int | (float | str))
1233
+ // into a flat list of expressions like [int, float, str]
1234
+ if (node.kind() == '|') {
1235
+ auto as_binop = torch::jit::BinOp(node);
1236
+ _flatten_pep604_union(as_binop.lhs(), result);
1237
+ _flatten_pep604_union(as_binop.rhs(), result);
1238
+ } else {
1239
+ result->push_back(node);
1240
+ }
1241
+ }
1242
+
1243
+ inline std::vector<Expr> get_pep604_union_members(const Expr& node) {
1244
+ std::vector<Expr> result;
1245
+ _flatten_pep604_union(node, &result);
1246
+ return result;
1247
+ }
1248
+
1249
+ // Flattens a PEP 604 union into a classical union.
1250
+ // For example, ((x | y) | z) is transformed into Union[x, y, z].
1251
+ inline Expr pep604union_to_union(const Expr& expr) {
1252
+ // noop if not a pep604 union
1253
+ if (expr.kind() != '|')
1254
+ return expr;
1255
+
1256
+ // In order to support unions with more than 2 operands ((x|y)|z), we need to
1257
+ // recursively flatten the tree of | expressions.
1258
+ auto members = get_pep604_union_members(expr);
1259
+ auto synthesised_union = Subscript::create(
1260
+ expr.range(),
1261
+ Var::create(expr.range(), Ident::create(expr.range(), "Union")),
1262
+ List<Expr>::create(expr.range(), members));
1263
+ return std::move(synthesised_union);
1264
+ }
1265
+
1266
+ } // namespace jit
1267
+ } // namespace torch
1268
+
1269
+ namespace std {
1270
+
1271
+ template <typename T>
1272
+ struct iterator_traits<torch::jit::ListIterator<T>>
1273
+ : std::iterator_traits<torch::jit::TreeList::const_iterator> {};
1274
+
1275
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <caffe2/serialize/versions.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/api/module.h>
6
+
7
+ #include <cstdint>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ // Maps the given symbol into an implementation of its behavior at the
12
+ // given version.
13
+ // See note [Versioned Symbols]
14
+ TORCH_API Symbol
15
+ get_symbol_for_version(const Symbol name, const uint64_t version);
16
+
17
+ // Maps the given kind to the minimum version that supports it.
18
+ // See note [Dynamic Versions and torch.jit.save vs. torch.save]
19
+ TORCH_API uint64_t get_min_version_for_kind(const NodeKind& kind);
20
+ } // namespace jit
21
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/operator_name.h>
7
+ #include <torch/csrc/jit/runtime/instruction.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ namespace mobile {
12
+
13
+ using Stack = std::vector<c10::IValue>;
14
+ using DebugHandle = int64_t;
15
+
16
+ class Function;
17
+
18
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
19
+ struct Code {
20
+ std::vector<Instruction> instructions_;
21
+ std::vector<DebugHandle> debug_handles_;
22
+ std::vector<c10::OperatorName> op_names_;
23
+ std::vector<int> operator_input_sizes_;
24
+ std::vector<std::function<void(Stack&)>> operators_;
25
+ std::vector<c10::IValue> constants_;
26
+ std::vector<c10::TypePtr> types_;
27
+ // TODO After we actually export CALL instructions we can remove this.
28
+ // We may need a two-stage importing scheme, where we firstly construct all
29
+ // function objects, and then append referenced function pointers. This could
30
+ // be done in parseMethods().
31
+ std::vector<mobile::Function*> functions_;
32
+ size_t register_size_ = 0; // Aggregated output size.
33
+ // initialized means operators_ array is filled with operators
34
+ bool initialized = false;
35
+ };
36
+
37
+ } // namespace mobile
38
+ } // namespace jit
39
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/mobile/module.h>
7
+
8
+ #include <istream>
9
+ #include <map>
10
+ #include <string>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ /**
16
+ * Loads named parameters from the serialized data in @p in.
17
+ *
18
+ * Calls #TORCH_CHECK() if the data format is not recognized.
19
+ */
20
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
21
+ std::istream& in,
22
+ c10::optional<at::Device> device = c10::nullopt);
23
+
24
+ /**
25
+ * Loads named parameters from the serialized data in @p filename.
26
+ *
27
+ * Calls #TORCH_CHECK() if the data format is not recognized.
28
+ */
29
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device = c10::nullopt);
32
+
33
+ // NOTE: Please prefer using _load_parameters over using the function below.
34
+ TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
35
+ const mobile::Module& module);
36
+
37
+ } // namespace jit
38
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/mobile/function.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace mobile {
9
+
10
+ class Module;
11
+
12
+ struct TORCH_API Method {
13
+ Method(const Module* owner, Function* function);
14
+
15
+ void run(Stack& stack) const;
16
+ void run(Stack&& stack) const {
17
+ run(stack);
18
+ }
19
+
20
+ c10::IValue operator()(std::vector<c10::IValue> stack) const;
21
+
22
+ const std::string& name() const {
23
+ return function_->name();
24
+ }
25
+
26
+ int64_t get_debug_handle(size_t pc) const {
27
+ return function_->get_debug_handle(pc);
28
+ }
29
+
30
+ Function& function() const {
31
+ return *function_;
32
+ }
33
+
34
+ private:
35
+ // Methods are uniquely owned by a single module.
36
+ // This raw pointer allows referencing the module
37
+ const Module* owner_;
38
+
39
+ // Underlying unbound function
40
+ Function* function_;
41
+ };
42
+
43
+ } // namespace mobile
44
+ } // namespace jit
45
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <torch/csrc/jit/mobile/debug_info.h>
4
+ #include <torch/csrc/jit/mobile/function.h>
5
+ #include <torch/csrc/jit/mobile/method.h>
6
+ #include <torch/csrc/jit/mobile/quantization.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace mobile {
13
+ using Stack = std::vector<c10::IValue>;
14
+
15
+ // A CompilationUnit object is the one that gets executed by the lite
16
+ // interpreter.
17
+ //
18
+ // A CompilationUnit object contains a list of Method Objects. These are methods
19
+ // that appear in the original PyTorch Model. These method correspond to Python
20
+ // member functions of the Model class.
21
+ //
22
+ // Methods in turn contain a Function, and a back-pointer to the Module that
23
+ // owns this Method instance.
24
+ //
25
+ // A Function contains a Code Object (code_) which is defined in interpreter.h
26
+ //
27
+ // A Code object contains the following:
28
+ //
29
+ // std::vector<Instruction> instructions_;
30
+ // std::vector<c10::OperatorName> op_names_;
31
+ // std::vector<std::function<void(Stack&)>> operators_;
32
+ // std::vector<c10::IValue> constants_;
33
+ // std::vector<c10::TypePtr> types_;
34
+ // size_t register_size_; // Aggregated output size.
35
+ //
36
+ class CompilationUnit {
37
+ public:
38
+ void register_function(std::unique_ptr<Function> fn);
39
+ std::vector<std::unique_ptr<Function>>& methods() {
40
+ return methods_;
41
+ }
42
+ const std::vector<std::unique_ptr<Function>>& methods() const {
43
+ return methods_;
44
+ }
45
+ Function* find_function(const c10::QualifiedName& qn);
46
+ const Function* find_function(const c10::QualifiedName& qn) const;
47
+
48
+ void unsafeRemoveFunction(const int64_t index) {
49
+ methods_.erase(methods_.begin() + index);
50
+ }
51
+
52
+ private:
53
+ std::vector<std::unique_ptr<Function>> methods_;
54
+ };
55
+
56
+ // A Torch Mobile Module is a representation of the model (trained in case
57
+ // of inference). A Mobile Module contains
58
+ //
59
+ // 1. data (object_)
60
+ // 2. metadata (optional) about the model (metadata_ from the metadata.pkl
61
+ // file added after training)
62
+ // 3. Compilation Unit (cu_)
63
+ //
64
+ class TORCH_API Module {
65
+ public:
66
+ Module(
67
+ c10::intrusive_ptr<c10::ivalue::Object> object,
68
+ std::shared_ptr<CompilationUnit> cu)
69
+ : object_(std::move(object)), cu_(std::move(cu)) {}
70
+ Module() = default;
71
+ Method get_method(const std::string& method_name) const;
72
+ template <typename... Types>
73
+ c10::IValue run_method(const std::string& method_name, Types&&... args) {
74
+ return get_method(method_name)({IValue(std::forward<Types>(args))...});
75
+ }
76
+ c10::IValue forward(std::vector<c10::IValue> inputs) {
77
+ return get_method("forward")(std::move(inputs));
78
+ }
79
+ c10::optional<Method> find_method(const std::string& basename) const;
80
+
81
+ const std::string name() const {
82
+ return object_->name();
83
+ }
84
+ const std::vector<at::IValue>& slots() const {
85
+ return object_->slots();
86
+ }
87
+ const c10::intrusive_ptr<c10::ivalue::Object> _ivalue() const {
88
+ return object_;
89
+ }
90
+ const std::vector<at::Tensor> parameters() const;
91
+ const std::map<std::string, at::Tensor> named_parameters() const;
92
+ std::string get_forward_method_debug_info(int64_t debug_handle) const;
93
+ std::string getModuleHierarchy(const int64_t debug_handle) const;
94
+ std::string getCallStack(const int64_t debug_handle) const;
95
+ /// Enables "training" mode.
96
+ void train(bool on = true);
97
+ /// Calls train(false) to enable "eval" mode.
98
+ void eval() {
99
+ train(/*on=*/false);
100
+ }
101
+ /// True if the module is in training mode.
102
+ bool is_training() const;
103
+ const std::unordered_map<std::string, std::string> getMetadata() const {
104
+ return metadata_;
105
+ }
106
+ void setMetadata(
107
+ const std::unordered_map<std::string, std::string>& metadata) {
108
+ metadata_ = metadata;
109
+ }
110
+ const std::vector<Method> get_methods() const;
111
+
112
+ c10::IValue attr(const std::string& name, c10::IValue or_else) const {
113
+ if (auto r = object_->type()->findAttributeSlot(name)) {
114
+ return object_->getSlot(*r);
115
+ }
116
+ if (auto r = object_->type()->findConstantSlot(name)) {
117
+ return object_->type()->getConstant(*r);
118
+ }
119
+ return or_else;
120
+ }
121
+
122
+ void setDebugTable(MobileDebugTable&& debug_table) {
123
+ debug_table_ = std::move(debug_table);
124
+ }
125
+ const MobileDebugTable& getDebugTable() const {
126
+ return debug_table_;
127
+ }
128
+
129
+ void setHasDebugHandles(bool has_debug_handles) {
130
+ has_debug_handles_ = has_debug_handles;
131
+ }
132
+
133
+ bool hasDebugHandles() const {
134
+ return has_debug_handles_;
135
+ }
136
+
137
+ const CompilationUnit& compilation_unit() const {
138
+ return *cu_.get();
139
+ }
140
+
141
+ void set_delete_memory(std::shared_ptr<char> delete_mem) {
142
+ mem_to_delete_ = std::move(delete_mem);
143
+ }
144
+
145
+ void set_min_operator_version(int64_t version) {
146
+ min_operator_version_ = version;
147
+ }
148
+
149
+ int64_t min_operator_version() const {
150
+ return min_operator_version_;
151
+ }
152
+
153
+ void set_bytecode_version(int64_t version) {
154
+ bytecode_version_ = version;
155
+ }
156
+
157
+ int64_t bytecode_version() const {
158
+ return bytecode_version_;
159
+ }
160
+
161
+ private:
162
+ friend class quantization::PTQQuanizationHelper;
163
+
164
+ bool compareMethodSchemas(
165
+ const std::string& name_1,
166
+ const std::string& name_2);
167
+
168
+ void unsafeRemoveMethod(const std::string& basename);
169
+
170
+ void unsafeCopyMethod(
171
+ const std::string& new_method_name,
172
+ const Function& to_be_copied);
173
+
174
+ c10::intrusive_ptr<c10::ivalue::Object> object_;
175
+ std::unordered_map<std::string, std::string> metadata_;
176
+ std::shared_ptr<CompilationUnit> cu_;
177
+ MobileDebugTable debug_table_;
178
+ bool has_debug_handles_ = false;
179
+ int64_t min_operator_version_ = 4;
180
+ int64_t bytecode_version_ = 4;
181
+
182
+ // Extra handle for the module to delete when itself is deleted
183
+ std::shared_ptr<char> mem_to_delete_;
184
+ };
185
+
186
+ struct TORCH_API ModuleInfo {
187
+ uint64_t bytecode_version;
188
+ uint64_t operator_version;
189
+ std::unordered_map<std::string, int> opname_to_num_args;
190
+ std::unordered_set<std::string> function_names;
191
+ std::unordered_set<std::string> type_names;
192
+ };
193
+ TORCH_API ModuleInfo get_module_info(const mobile::Module& module);
194
+
195
+ } // namespace mobile
196
+ } // namespace jit
197
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ThreadLocalDebugInfo.h>
4
+ #include <string>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+
10
+ class MobileDebugInfo : public c10::DebugInfoBase {
11
+ public:
12
+ const std::string& getModelName() {
13
+ return model_name_;
14
+ }
15
+
16
+ void setModelName(const std::string& model_name) {
17
+ model_name_ = model_name;
18
+ }
19
+
20
+ const std::string& getMethodName() {
21
+ return method_name_;
22
+ }
23
+
24
+ void setMethodName(const std::string& method_name) {
25
+ method_name_ = method_name;
26
+ }
27
+
28
+ size_t getOpIdx() {
29
+ return op_idx_;
30
+ }
31
+
32
+ void setOpIdx(size_t op_idx) {
33
+ op_idx_ = op_idx;
34
+ }
35
+
36
+ private:
37
+ std::string model_name_;
38
+ std::string method_name_;
39
+ // TODO: Kimish
40
+ // If we launch a thread such as for at::launch, interepter continuation
41
+ // and if the caching allocator is enabled in the base thread
42
+ // then, in order to propagate this information, that is caching allocator
43
+ // is enabled, across thread boundaries we can use the mechanism provided
44
+ // by ThreadLocalDebugInfo
45
+ // Once the thread local MobileDebugInfo is accessible in the launched
46
+ // thread, it can be accessed in that thread and that thread can set
47
+ // its own thread local CachingAllocatorInfo.
48
+ // However, we cannot expect every launched thread to extract and set
49
+ // its own thread local copy of CachingAllocatorInfo.
50
+ // But this can be done in lite interpreter, where in the run method
51
+ // it can do info =
52
+ // c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::MOBILE_RUNTIME_INFO))
53
+ // .get_caching_allocator_info();
54
+ // GetThreadLocalCachingAllocatorInfo() = info;
55
+ // Other option is to have MobileDebugInfo itself be the place where thread
56
+ // local copy of CachingAllocatorInfo is stored. Then
57
+ // DefaultMobileCPUAllocator inspects this to decide if to use
58
+ // CachingAllocator. However, current lite interpreter does not support FORK,
59
+ // thus from the run method of lite interpreter we are not really gonna launch
60
+ // another instance of lite interpreter in a different thread. So for now not
61
+ // getting bothered about passing CachingAllocatorInfo across thread
62
+ // boundaries. c10::CachingAllocatorInfo caching_allocator_info;
63
+ size_t op_idx_ = 0;
64
+ };
65
+
66
+ class MobileModuleObserver {
67
+ public:
68
+ virtual ~MobileModuleObserver() = default;
69
+
70
+ virtual void onEnterRunMethod(const int32_t) {}
71
+ virtual void onExitRunMethod(
72
+ const std::unordered_map<std::string, std::string>&,
73
+ const std::string&,
74
+ const int32_t) {}
75
+ virtual void onFailRunMethod(
76
+ const std::unordered_map<std::string, std::string>&,
77
+ const std::string&,
78
+ const int32_t,
79
+ const char*) {}
80
+ virtual void onEnterLoadModel(const int32_t) {}
81
+ virtual void onExitLoadModel(
82
+ const int32_t,
83
+ const std::unordered_map<std::string, std::string>&) {
84
+ } // key: filename, value: file content
85
+ virtual void onFailLoadModel(const int32_t, const char*) {}
86
+ virtual void onFailLoadModel(
87
+ const int32_t,
88
+ const char*,
89
+ const std::unordered_map<std::string, std::string>&) {}
90
+ virtual std::vector<std::string> getDefaultExtraFiles() = 0;
91
+ virtual std::unordered_map<std::string, std::string> processMetadataFromExtra(
92
+ const std::unordered_map<std::string, std::string>&) = 0;
93
+ };
94
+
95
+ class MobileObserverConfig {
96
+ public:
97
+ void setModuleObserver(std::unique_ptr<MobileModuleObserver> reporter) {
98
+ module_observer_ = std::move(reporter);
99
+ }
100
+ MobileModuleObserver* getModuleObserver() {
101
+ return module_observer_.get();
102
+ }
103
+
104
+ private:
105
+ std::unique_ptr<MobileModuleObserver> module_observer_;
106
+ };
107
+
108
+ MobileObserverConfig& observerConfig();
109
+
110
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_operators.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/function.h>
3
+
4
+ namespace torch {
5
+ namespace jit {
6
+ using c10::IValue;
7
+
8
+ enum MobileModuleLoadOptions {
9
+ OPERATOR_CHECK = 1,
10
+ // PARSE_ALL_EXTRA_FILE_MAPS is used to gate for ExtraFileMaps to pull all
11
+ // files automatically without explicit entries mapping. Refer to PR for a
12
+ // detail: https://github.com/pytorch/pytorch/pull/99747
13
+ PARSE_ALL_EXTRA_FILE_MAPS = 2,
14
+ };
15
+
16
+ const uint64_t kDefaultMobileLoadOptions =
17
+ MobileModuleLoadOptions::OPERATOR_CHECK;
18
+
19
+ namespace mobile {
20
+
21
+ TORCH_API void parseOperators(
22
+ c10::ivalue::TupleElements&& ops_list,
23
+ const uint64_t& module_load_options,
24
+ mobile::Function* function);
25
+ } // namespace mobile
26
+ } // namespace jit
27
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/autograd/profiler_kineto.h>
3
+ #include <torch/csrc/jit/mobile/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ namespace mobile {
8
+
9
+ // If we dont have kineto available then edge profiler does not
10
+ // work since it relies on Kineto
11
+ #ifdef USE_KINETO
12
+ class TORCH_API KinetoEdgeCPUProfiler {
13
+ public:
14
+ // This profiler only profiles KINETO events
15
+ // No GPU_FALLBACK or NVTX
16
+ /*
17
+ * @param m is the instance of mobile Module which is being profiled.
18
+ * Note that this implies that KinetoEdgeCPUProfiler can be used
19
+ * to profile specific Module (see usage below), unliked ProfilerKineto
20
+ * which can profile pytorch runtime in arbitrary scope.
21
+ * @param fname is the name of the file to which chrome trace is written.
22
+ * @param report_input_shapes: whether to record shapes of op's inputs.
23
+ * @param with_stack: whether to record model's python stacktrace for the op.
24
+ * @param with_flops: whether to report flops corresponding to the op.
25
+ * @param with_modules: whether to report original python module
26
+ * hierarchy to which the op belongs.
27
+ * @param events
28
+ * @param adjust_vulkan_timestamps: whether to adjust vulkan timestamps from
29
+ * query pool to align with cpu event times
30
+ *
31
+ * Usage pattern for this profiler must be as follows:
32
+ *
33
+ * {
34
+ * KinetoEdgeCPUProfiler(m, filename, args);
35
+ * m.forward(...);
36
+ * }
37
+ *
38
+ * The reason being that KinetoEdgeCPUProfiler has a dependency on Module
39
+ * and thus it must not outlive it.
40
+ *
41
+ * Thus, when KinetoEdgeCPUProfiler is used as RAII to do profiling
42
+ * within certain scope. In that scope, the captured reference to
43
+ * Module will outlive KinetoEdgeCPUProfiler. This is gauranteed because
44
+ * KinetoEdgeCPUProfiler must be constructed later than Module, on stack.
45
+ *
46
+ * An example of the anti-pattern and wrong usage is:
47
+ *
48
+ * std::shared_ptr<KinetoMobileCPUProfiler> profiler(m, filename, args);
49
+ * m.forward(...);
50
+ *
51
+ * Since KinetoEdgeCPUProfiler object would then be constructed on heap
52
+ * with its lifetime managed manually or via smart pointers.
53
+ */
54
+ KinetoEdgeCPUProfiler(
55
+ const torch::jit::mobile::Module& m,
56
+ const std::string& fname,
57
+ const bool report_input_shapes = false,
58
+ const bool profile_memory = false,
59
+ const bool with_stack = false,
60
+ const bool with_flops = false,
61
+ const bool with_modules = false,
62
+ std::vector<std::string> events = {},
63
+ const bool adjust_vulkan_timestamps = false);
64
+
65
+ const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
66
+ disableProfiler();
67
+ const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
68
+ getProfilerResult();
69
+ void recordBackendEvent(
70
+ const int64_t start_time_us,
71
+ const int64_t end_time_us,
72
+ const int64_t debug_handle,
73
+ const std::string& event_name,
74
+ const std::string& backend_name);
75
+ void recordBackendMemoryEvent(
76
+ void* ptr,
77
+ int64_t alloc_size,
78
+ size_t total_allocated,
79
+ size_t total_reserved,
80
+ c10::Device device);
81
+
82
+ ~KinetoEdgeCPUProfiler();
83
+
84
+ private:
85
+ /*
86
+ * We store a reference to Module to make such dependency explicit, since
87
+ * a Module reference is already stored in a functor.
88
+ */
89
+ const mobile::Module& m_;
90
+ std::string trace_file_name_;
91
+ std::unique_ptr<torch::autograd::profiler::ProfilerResult> profiler_result_;
92
+ };
93
+
94
+ TORCH_API KinetoEdgeCPUProfiler* getCurrentEdgeProfiler();
95
+
96
+ #define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
97
+ start_time_us, end_time_us, debug_handle, event_name, backend_name) \
98
+ if (mobile::getCurrentEdgeProfiler()) { \
99
+ mobile::getCurrentEdgeProfiler()->recordBackendEvent( \
100
+ start_time_us, end_time_us, debug_handle, event_name, backend_name); \
101
+ }
102
+
103
+ #define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
104
+ ptr, alloc_size, total_allocated, total_reserved, device) \
105
+ if (mobile::getCurrentEdgeProfiler()) { \
106
+ mobile::getCurrentEdgeProfiler()->recordBackendMemoryEvent( \
107
+ ptr, alloc_size, total_allocated, total_reserved, device); \
108
+ }
109
+ #else
110
+
111
+ #define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
112
+ start_time_us, end_time_us, debug_handle, event_name, backend_name)
113
+
114
+ #define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
115
+ ptr, alloc_size, total_allocated, total_reserved, device)
116
+ #endif
117
+ } // namespace mobile
118
+ } // namespace jit
119
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/pybind.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ void initJITBindings(PyObject* module);
8
+
9
+ } // namespace torch::jit
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/symbol.h>
7
+ #include <c10/util/irange.h>
8
+ #include <torch/csrc/DynamicTypes.h>
9
+ #include <torch/csrc/THP.h>
10
+ #include <torch/csrc/autograd/variable.h>
11
+ #include <torch/csrc/jit/frontend/tracer.h>
12
+ #include <torch/csrc/jit/python/pybind_utils.h>
13
+ #include <torch/csrc/utils/pybind.h>
14
+
15
+ #include <pybind11/functional.h>
16
+ #include <pybind11/pybind11.h>
17
+ #include <pybind11/stl.h>
18
+
19
+ namespace py = pybind11;
20
+
21
+ namespace torch::jit {
22
+
23
+ // This is a variant of shared_ptr that "sees through" a wrapper.
24
+ // We use it to convert Value, Node, Block and node to "wrapped" Python
25
+ // values. When we destruct the C++ object, the wrapper's pointer will
26
+ // be set to 0 and any future dereferencing will throw. We need this
27
+ // because the Python objects may hang around after the C++ object
28
+ // has already been destroyed.
29
+ // This also needs the magic type_caster below, which is from the
30
+ // workaround offered in https://github.com/pybind/pybind11/issues/2751
31
+ template <typename T>
32
+ class unwrapping_shared_ptr {
33
+ static_assert(
34
+ std::is_same<T, torch::jit::Value>::value ||
35
+ std::is_same<T, torch::jit::Node>::value ||
36
+ std::is_same<T, torch::jit::Block>::value,
37
+ "unwrapping type only defined for Graph object types");
38
+
39
+ private:
40
+ std::shared_ptr<torch::jit::Wrap<T>> impl;
41
+
42
+ public:
43
+ unwrapping_shared_ptr() : impl({}) {}
44
+ explicit unwrapping_shared_ptr(T* p) : impl(p->wrap()) {
45
+ impl->clear_cb = &clear_registered_instances;
46
+ }
47
+ T* get() const {
48
+ if (!impl->elem) {
49
+ throw std::logic_error("has been invalidated");
50
+ }
51
+ return impl->elem;
52
+ }
53
+ // we need to disable the overloaded & for PyBind11 < 2.3 due.
54
+ // see https://github.com/pybind/pybind11/pull/1435
55
+ #if (PYBIND11_VERSION_MAJOR > 2) || \
56
+ ((PYBIND11_VERSION_MAJOR == 2) && (PYBIND11_VERSION_MINOR >= 3))
57
+ T** operator&() {
58
+ if (!impl->elem) {
59
+ throw std::logic_error("has been invalidated");
60
+ }
61
+ return &(impl->elem);
62
+ }
63
+ #endif
64
+ };
65
+
66
+ } // namespace torch::jit
67
+
68
+ PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true);
69
+
70
+ namespace pybind11::detail {
71
+
72
+ #define CREATE_UNWRAPPING_CASTER(Class) \
73
+ template <> \
74
+ struct type_caster<Class> : public type_caster_base<Class> { \
75
+ public: \
76
+ using type = Class; \
77
+ using holder_type = torch::jit::unwrapping_shared_ptr<Class>; \
78
+ \
79
+ bool load(handle src, bool convert) { \
80
+ return load_impl<type_caster<Class>>(src, convert); \
81
+ } \
82
+ \
83
+ explicit operator type*() { \
84
+ return static_cast<type*>(value); \
85
+ } \
86
+ explicit operator type&() { \
87
+ return *static_cast<type*>(value); \
88
+ } \
89
+ \
90
+ protected: \
91
+ friend class type_caster_generic; \
92
+ \
93
+ bool load_value(value_and_holder&& v_h) { \
94
+ if (v_h.holder_constructed()) { \
95
+ value = v_h.template holder<holder_type>().get(); \
96
+ return true; \
97
+ } else { \
98
+ throw cast_error( \
99
+ "Unable to cast from non-held to held instance (#Class& to Holder<#Class>)"); \
100
+ } \
101
+ } \
102
+ }
103
+
104
+ CREATE_UNWRAPPING_CASTER(torch::jit::Node);
105
+ CREATE_UNWRAPPING_CASTER(torch::jit::Value);
106
+ CREATE_UNWRAPPING_CASTER(torch::jit::Block);
107
+
108
+ #undef CREATE_UNWRAPPING_CASTER
109
+
110
+ template <>
111
+ struct type_caster<torch::jit::IValue> {
112
+ public:
113
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
114
+ PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue"));
115
+
116
+ bool load(handle src, bool) {
117
+ try {
118
+ value = torch::jit::toTypeInferredIValue(src);
119
+ return true;
120
+ } catch (std::exception& e) {
121
+ return false;
122
+ }
123
+ }
124
+
125
+ static handle cast(
126
+ torch::jit::IValue src,
127
+ return_value_policy /* policy */,
128
+ handle /* parent */) {
129
+ return torch::jit::toPyObject(std::move(src)).release();
130
+ }
131
+ };
132
+
133
+ template <>
134
+ struct type_caster<torch::jit::Symbol> {
135
+ public:
136
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
137
+ PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol"));
138
+
139
+ bool load(handle src, bool) {
140
+ // TODO: Is there a way to py::cast that doesn't raise an exception on
141
+ // failure? Can we catch pybind11::cast_error here instead?
142
+ std::string src_str;
143
+ try {
144
+ src_str = py::cast<std::string>(src);
145
+ } catch (std::exception& e) {
146
+ return false;
147
+ }
148
+ value = torch::jit::Symbol::fromQualString(src_str);
149
+ return true;
150
+ }
151
+
152
+ static handle cast(
153
+ torch::jit::Symbol src,
154
+ return_value_policy /* policy */,
155
+ handle /* parent */) {
156
+ return py::cast(std::string(src.toQualString()), return_value_policy::copy)
157
+ .release();
158
+ }
159
+ };
160
+
161
+ template <>
162
+ struct type_caster<torch::jit::AttributeKind> {
163
+ public:
164
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
165
+ PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind"));
166
+
167
+ bool load(handle src, bool) {
168
+ return false;
169
+ }
170
+
171
+ static handle cast(
172
+ torch::jit::AttributeKind src,
173
+ return_value_policy /* policy */,
174
+ handle /* parent */) {
175
+ return py::cast(
176
+ std::string(torch::jit::toString(src)),
177
+ return_value_policy::copy)
178
+ .release();
179
+ }
180
+ };
181
+
182
+ // See https://github.com/pybind/pybind11/issues/637
183
+ using ListCasterBase = pybind11::detail::
184
+ list_caster<std::vector<torch::jit::Node*>, torch::jit::Node*>;
185
+ template <>
186
+ struct type_caster<std::vector<torch::jit::Node*>> : ListCasterBase {
187
+ static handle cast(
188
+ const std::vector<torch::jit::Node*>& src,
189
+ return_value_policy,
190
+ handle parent) {
191
+ return ListCasterBase::cast(src, return_value_policy::reference, parent);
192
+ }
193
+ static handle cast(
194
+ const std::vector<torch::jit::Node*>* src,
195
+ return_value_policy pol,
196
+ handle parent) {
197
+ return cast(*src, pol, parent);
198
+ }
199
+ };
200
+
201
+ } // namespace pybind11::detail
202
+
203
+ namespace torch::jit {
204
+
205
+ static inline py::tuple tuple_tail(const py::tuple& tup) {
206
+ py::tuple r(tup.size() - 1);
207
+ for (const auto i : c10::irange(1, tup.size())) {
208
+ r[i - 1] = tup[i];
209
+ }
210
+ return r;
211
+ }
212
+
213
+ } // namespace torch::jit