applied-ai-018 commited on
Commit
ac53a09
·
verified ·
1 Parent(s): 1bf3e6e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/16.attention.query_key_value.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step20/zero/16.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step20/zero/16.attention.query_key_value.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  8. ckpts/universal/global_step20/zero/24.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  9. ckpts/universal/global_step20/zero/24.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  10. ckpts/universal/global_step20/zero/24.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  11. ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/fp32.pt +3 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h +140 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h +65 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h +41 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h +11 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h +34 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h +18 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h +10 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h +11 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h +16 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h +241 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h +16 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h +15 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h +54 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h +12 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h +17 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h +16 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h +21 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h +575 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h +57 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h +27 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h +87 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h +33 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h +7 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h +68 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h +70 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h +40 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h +55 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h +457 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h +47 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h +12 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h +857 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h +412 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h +220 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h +1275 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h +21 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h +322 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h +184 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h +61 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h +201 -0
ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:204138b3a3ad21e351ea80f6569a98d908fcce2bb5aaff266f28d0b6409bf21d
3
+ size 33555612
ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:481827ebed6ed7ee26070817e4a33c322393943ac1760d32715b8083c177e5d8
3
+ size 33555627
ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0684f2e44930a33407e4b6b9b284da5dda0afc997c0ff1c1b7ec5f8bb7a2c29
3
+ size 33555533
ckpts/universal/global_step20/zero/16.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57bcacb765d9774ed939065b1fa67533644c9106627d1800fb3c7e50dc51afde
3
+ size 50332828
ckpts/universal/global_step20/zero/16.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6a4ce9840d9b271aea9cb57c0ff0e3abf064bbeb461f1d099f00eae72b89245
3
+ size 50332843
ckpts/universal/global_step20/zero/16.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd6046f7442044431e0eada22e34d4376eb4e71dd34d2c37f69683ecfa4f3f69
3
+ size 50332749
ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8563ddcc6bc0f0310ab73c84f3b0db95df3750257edd1ef034fbad75ee8d0f1c
3
+ size 33555533
ckpts/universal/global_step20/zero/24.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b915c8062030ba818639f7af5ebbb2663fbfc8b7005d14234a811262e774caf
3
+ size 33555612
ckpts/universal/global_step20/zero/24.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a080063c2418375c61cef356ffcd6fc20cf6019b1afbed3c87a9bfd5cdd55e38
3
+ size 33555627
ckpts/universal/global_step20/zero/24.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d33f5eaf4fc9b3ad57d5baa152e71bf7f61c3cb341c9f32dcf01cd91afb9fc20
3
+ size 33555533
ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2e6054f3e0b224edbc2dd1fd9ae613a3659c5d3aadb9b12a7e7f83b8202e511
3
+ size 9293
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+
4
+ #include <torch/csrc/jit/backends/backend_detail.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /*
14
+ * BackendDebugHandleManager is responsible for issuing debug handles to
15
+ * backends. Debug handles are associated with nodes of a graph.
16
+ * BackendDebugHandleManager also maintains a map
17
+ * [debug-handle, DebugInfoTuple = {source range, inlined callstack ptr]} that
18
+ * will help generate a callstack for exception raised using debug handles.
19
+ * Effectively debug handles are something that is given to backend and later
20
+ * when an exception occurs in the backend, backend can tell, using debug
21
+ * handle, that an exception occurred here. Then the runtime can generate
22
+ * callstack correspoding to the exception.
23
+ * There are two parts to BackendDebugHandleManager:
24
+ * 1. static std::atomic debug_handle
25
+ * 2. Map of [debug-handle, DebugInfoTuple]
26
+ *
27
+ * About 1:
28
+ * Why do they have to be unique. The reason is that by ensuring
29
+ * uniqueness of debug handles, we remove the burden of another layer of
30
+ * mapping where we need to say this set of debug handles were generated for
31
+ * this lowered module or this bytecode function. This simplifies the API for
32
+ * serialization since debug handles can uniquely identify DebugInfoTuple.
33
+ * Thus simplifies the runtime API for throwing exception. Exception throwing
34
+ * only needs to know debug_handle and not which module or method threw it.
35
+ * There are 2 issues to keep in mind, though,for static std::atomic
36
+ * debug_handle: A. Performance implications of using atomic variable. However
37
+ * this is only used for compilation so we assume to absorb some of that
38
+ * penalty. Plus if there is no contention then we should have less to worry
39
+ * about. B. If repeated compilation is part of a long running process then we
40
+ * may overflow int64_t. We may detect and fail on this. For now this is not
41
+ * done.
42
+ *
43
+ * Now about 2:
44
+ * There are two usecases for [debug-handle, DebugInfoTuple]
45
+ * A. During bytecode generation the DebugInfoTuple corresponding to the nodes
46
+ * of the inlined graph being serialized, are stored in this object and a
47
+ * unique debug handle is returned. This unique debug handle is stored in
48
+ * mobile_debug info for pytorch lite models. It will be used for raising
49
+ * exceptions as well as profiling. B. During backend lowering, each backend's
50
+ * preprocess/compile method can compile method's graph and serialize those
51
+ * methods. Once the method is lowered to backend, graph is essentially lost.
52
+ * Without access to graph it is hard to generate model level debug info. Thus
53
+ * the debug handles provide a way to map nodes of the graph to the model level
54
+ * debug info.
55
+ *
56
+ * During byte-code model serialization, [debug-handle, DebugInfoTuple] is
57
+ * serialized. Now we know a. debug handles and b. how to map debug handles to
58
+ * model source code. Thus we can either do eager symbolication by converting
59
+ * debug handles to corresponding source code at runtime, or do lazy
60
+ * symbolicattion offline.
61
+ *
62
+ * Note that it is not necessary to serialize [debug-handle, DebugInfoTuple]
63
+ * corresponding to lowered backend if the lowering process, that is
64
+ * preprocess/compile, and execution happens in the same session, then eager
65
+ * symbolication can be employed.
66
+ *
67
+ * Now how does BackendDebugHandleManager capture all of the above?
68
+ * By providing two API.
69
+ * 1. getNextDebugHandle which given a Node* returns a unique debug handle,
70
+ * that will uniquely identify DebugInfoTuple.
71
+ * and
72
+ * 2. getCallStackPtrMap which returns the map
73
+ * [debug-handle, DebugInfoTuple]
74
+ *
75
+ * 1 provides debug handles to backends and 2 provides runtime a way to map
76
+ * debug handles to source level debug info.
77
+ *
78
+ * So why does debug handle map to DebugInfoTuple = {source range and inlined
79
+ * cs}? {debug_handle, source_range_tag, serialized_callstack} Take this
80
+ * example: class L(nn.Module): def __init__(self):
81
+ * ...
82
+ * def forward(self, x):
83
+ * return x * 5
84
+ * class M(nn.Module):
85
+ * def __init__(self):
86
+ * ...
87
+ * def forward(self, x):
88
+ * return x - 2
89
+ * class N(nn.Module):
90
+ * def __init__(self):
91
+ * self.m = M()
92
+ * def forward(self, x):
93
+ * return self.m(x) + 3
94
+ * m = torch.jit.script(N())
95
+ * Once you inline m's forward method, m.forward.graph will look something
96
+ * like this
97
+ * graph(%self...):
98
+ * %x = aten::mul(..)
99
+ * %x = aten::sub(x, ..)
100
+ * %y = aten::add(x, ..)
101
+ * ..
102
+ * Inlined callstack ptr for these two nodes will look like:
103
+ * aten::mul's inlined CS (callstack): [N.forward, source range] -> [M.forward,
104
+ * source range] aten::sub's inlined CS (callstack): [N.forward, source range]
105
+ * aten::add's inlined CS: null
106
+ * mul node's inlined CS contains only information about the callsites' source
107
+ * range The information about mul node's source range ('return x * 5') is not
108
+ * available in its inlined CS. It is rather part of node's source range
109
+ * instead of inlined CS. Thus to get full stack: [N.forward, source range] ->
110
+ * [M.forward, source range] -> [aten::mul's source range] We need to track
111
+ * mul's source range and inlined CS both.
112
+ */
113
+
114
+ using BackendDebugInfoMapType =
115
+ std::unordered_map<torch::jit::DebugHandleType, DebugInfoTuple>;
116
+
117
+ /*
118
+ * This class is used to generate debug info map.
119
+ * backend's preprocess will call generate_debug_handles (see
120
+ * backend_detail.cpp), which uses debug_handle_manager to generate debug
121
+ * handles. When lowering process finishes, calling stopRecording will
122
+ * return debug info map from debug_handle_manager
123
+ */
124
+ class TORCH_API BackendDebugInfoRecorder {
125
+ public:
126
+ BackendDebugInfoRecorder() = default;
127
+ int64_t getNextDebugHandle(const Node* node);
128
+ // Reason this is not done as RAII is that work done in stopRecording
129
+ // can throw, and throwing with dtor will call terminate and thus voids any
130
+ // exception catching at a higher level.
131
+ BackendDebugInfoMapType stopRecording();
132
+ NodeToDebugHandle generate_debug_handles(const std::shared_ptr<Graph>& graph);
133
+
134
+ private:
135
+ static std::atomic<DebugHandleType> unique_debug_handle_;
136
+ BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_;
137
+ };
138
+
139
+ } // namespace jit
140
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifndef BUILD_LITE_INTERPRETER
4
+ #include <torch/csrc/jit/backends/backend_debug_handler.h>
5
+ #endif
6
+ #include <torch/custom_class.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ constexpr static auto kBackendUtilsNamespace = "backendutils";
12
+ constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo";
13
+
14
+ #ifndef BUILD_LITE_INTERPRETER
15
+ /*
16
+ * Custom class for holding debug information in lowered modules, intended
17
+ * purely for keeping this information to be later serialized outside of the
18
+ * lowered module itself.
19
+ * Its usage pattern is:
20
+ * 1. LoweredModule declares an instance of this class in __backend_debug_info
21
+ * 2. During serialization, __backend_debug_info is used to obtain the debug
22
+ * information.
23
+ * 3. The contents of LoweredModule.__backend_debug_info are not serialized
24
+ * within the LoweredModule itself.
25
+ */
26
+ class TORCH_API PyTorchBackendDebugInfo : public torch::CustomClassHolder {
27
+ public:
28
+ PyTorchBackendDebugInfo() = default;
29
+
30
+ c10::optional<BackendDebugInfoMapType>& getDebugInfoMap() {
31
+ return debug_info_map_;
32
+ }
33
+
34
+ void setDebugInfoMap(BackendDebugInfoMapType&& debug_info_map) {
35
+ debug_info_map_ = std::move(debug_info_map);
36
+ }
37
+
38
+ private:
39
+ c10::optional<BackendDebugInfoMapType> debug_info_map_;
40
+ };
41
+
42
+ #else
43
+
44
+ /*
45
+ * Dummy instance exists for the following reason:
46
+ * __backend_debug_info is of type BackendDebugInfo which is a torchbind'
47
+ * class backed by cpp class PyTorchBackendDebugInfo.
48
+ * PyTorchBackendDebugInfo, depends on ir.h., scope.h, source_range etc.
49
+ * We dont include this on lite interpreter side. Thus on lite interpreter side
50
+ * we cannot have valid definition of PyTorchBackendDebugInfo. However we do not
51
+ * need valid instance of __backend_debug_info in lite interpreter anyway as we
52
+ * dont serialize this info as part of LowerdModule as mentioned ealrier.
53
+ * However since LoweredModule has registered attribute of __backend_debug_info
54
+ * we still need to make sure that BackendDebugInfo is registered with
55
+ * TorchScript. However in this instance it does not have to be backed by
56
+ * PyTorchBackendDebugInfo, so we create a dummy PyTorchBackendDebugInfoDummy
57
+ * just for this purpose.
58
+ */
59
+ class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder {
60
+ public:
61
+ PyTorchBackendDebugInfoDummy() = default;
62
+ };
63
+ #endif
64
+ } // namespace jit
65
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ #include <ATen/core/jit_type.h>
6
+
7
+ #include <functional>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ using DebugHandleType = int64_t;
13
+
14
+ using NodeToDebugHandle = std::unordered_map<Node*, DebugHandleType>;
15
+
16
+ using BackendDebugHandleGenerator =
17
+ std::function<NodeToDebugHandle(const std::shared_ptr<Graph>&)>;
18
+
19
+ namespace detail {
20
+
21
+ using BackendPreprocessFunction = std::function<c10::IValue(
22
+ const Module&,
23
+ const c10::Dict<IValue, IValue>&,
24
+ const BackendDebugHandleGenerator& generate_debug_handles)>;
25
+
26
+ TORCH_API void registerBackendPreprocessFunction(
27
+ const std::string& name,
28
+ const BackendPreprocessFunction& preprocess);
29
+
30
+ bool hasBackendPreprocessFunction(const std::string& name);
31
+
32
+ BackendPreprocessFunction getBackendPreprocessFunction(const std::string& name);
33
+
34
+ TORCH_API Module codegen_backend_module(
35
+ const std::string& backend_name,
36
+ const Module& orig_module,
37
+ const c10::Dict<IValue, IValue>& method_compile_spec,
38
+ const c10::DictTypePtr& any_dict_ty);
39
+ } // namespace detail
40
+ } // namespace jit
41
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/python/pybind.h>
4
+ #include <torch/csrc/utils/pybind.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ // Initialize Python bindings for JIT to_<backend> functions.
9
+ void initJitBackendBindings(PyObject* module);
10
+ } // namespace jit
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/custom_class.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Interface for a JIT backend.
9
+ class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
10
+ public:
11
+ PyTorchBackendInterface() noexcept;
12
+ ~PyTorchBackendInterface() override;
13
+
14
+ // Returns true if the backend is available to process delegation calls.
15
+ virtual bool is_available() = 0;
16
+
17
+ // Compile the module contained in \p processed using the details provided in
18
+ // \p method_compile_spec for each module method that should be compiled for
19
+ // the backend. \p method_compile_spec should be of type Dict<string, Any>.
20
+ // \returns a dictionary of type Dict<string, Any> that contains a backend
21
+ // handle each method that can run on the backend (i.e. each key in \p
22
+ // method_compile_spec).
23
+ virtual c10::impl::GenericDict compile(
24
+ c10::IValue processed,
25
+ c10::impl::GenericDict method_compile_spec) = 0;
26
+
27
+ // Execute the method specified by \p handle using \p inputs. \returns the
28
+ // outputs as a tuple.
29
+ virtual c10::impl::GenericList execute(
30
+ c10::IValue handle,
31
+ c10::impl::GenericList inputs) = 0;
32
+ };
33
+ } // namespace jit
34
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/backends/backend_detail.h>
4
+ namespace torch {
5
+ namespace jit {
6
+ class backend_preprocess_register {
7
+ std::string backend_name_;
8
+
9
+ public:
10
+ backend_preprocess_register(
11
+ const std::string& name,
12
+ const detail::BackendPreprocessFunction& preprocess)
13
+ : backend_name_(name) {
14
+ detail::registerBackendPreprocessFunction(name, preprocess);
15
+ }
16
+ };
17
+ } // namespace jit
18
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/frontend/resolver.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ // Create a Resolver for use in generating LoweredModules for specific backends.
8
+ TORCH_API std::shared_ptr<Resolver> loweredModuleResolver();
9
+ } // namespace jit
10
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API const std::vector<Function*>& getAllBuiltinFunctionsFor(Symbol name);
10
+ } // namespace jit
11
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <memory>
3
+
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct Graph;
10
+
11
+ // Transforms loops so that they can be represented as python
12
+ // for or while loops
13
+ TORCH_API void CanonicalizeModifiedLoops(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/python/pybind_utils.h>
6
+ #include <memory>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT };
14
+ class ConcreteModuleType;
15
+
16
+ // You can think of an nn.Module as a template that corresponds to a family of
17
+ // JIT types. The template "arguments" are things like the constant values.
18
+ // e.g.
19
+ // class M(nn.Module):
20
+ // __constants__ = ["const"]
21
+ // ...
22
+ //
23
+ // Is similar to writing the following in C++:
24
+ //
25
+ // template<TConst>
26
+ // class M {
27
+ // ...
28
+ // }
29
+ //
30
+ // We need to consider each different member of the type family a different JIT
31
+ // type because, e.g. different constant values lead to different versions of
32
+ // the same method.
33
+ //
34
+ // ConcreteModuleType corresponds to a single member of the type family, with
35
+ // all template arguments fully specified. Two Modules that share a
36
+ // ConcreteModuleType can share a JIT type, and vice versa.
37
+ //
38
+ // Why not just use a JIT type to represent concrete types? Because constants,
39
+ // function attributes, etc. are currently not representable in the type system,
40
+ // so this acts a non-first-class way of tracking concrete types.
41
+ //
42
+ // ConcreteModuleType is also the source of truth for servicing all
43
+ // ModuleValue::attr calls. This is so we can guarantee that if two Module's
44
+ // share a JIT type (and thus a ConcreteModuleType), then they behave the same
45
+ // way when you access attributes on them.
46
+
47
+ // ConcreteModuleType has two phases.
48
+ // 1. Creation: First we build it up, during the ScriptModule conversion
49
+ // process. This is represented by ConcreteModuleTypeBuilder.
50
+ // ...then the converter calls ConcreteModuleTypeBuilder::build(), producing
51
+ // a
52
+ // ConcreteModuleType ready for querying.
53
+ // 2. Querying: We use ConcreteModuleType as a source of truth for
54
+ // ModuleValue::attr calls during method compilation.
55
+
56
+ // Represents a concrete type during in the process for construction. We use
57
+ // this to decide whether we can share types between modules.
58
+ class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder {
59
+ public:
60
+ explicit ConcreteModuleTypeBuilder(py::object pyClass) {
61
+ TORCH_INTERNAL_ASSERT(pyClass);
62
+ pyClass_ = std::move(pyClass);
63
+ }
64
+
65
+ void addConstant(std::string name, py::object value);
66
+ void addConstant(std::string name, IValue value);
67
+ void addAttribute(
68
+ std::string name,
69
+ const TypePtr& type,
70
+ bool isParameter,
71
+ bool isBuffer);
72
+ void addFunctionAttribute(
73
+ std::string name,
74
+ const TypePtr& type,
75
+ py::object pyFunction);
76
+
77
+ void addModule(std::string name, std::shared_ptr<ConcreteModuleType> meta);
78
+
79
+ void addForwardHook(py::object hook);
80
+ void addForwardPreHook(py::object pre_hook);
81
+
82
+ void addOverload(
83
+ std::string methodName,
84
+ std::vector<std::string> overloadedMethodNames);
85
+ void addBuiltinFunction(std::string name, const std::string& symbol_name);
86
+ void addFailedAttribute(std::string name, std::string failureReason);
87
+ void addIgnoredAttribute(std::string name);
88
+ void setIterableModuleKind(IterableModuleKind kind);
89
+
90
+ // If a ConcreteModuleType is poisoned, it will never compare equal to any
91
+ // other concrete type
92
+ void setPoisoned();
93
+
94
+ std::shared_ptr<ConcreteModuleType> build() const {
95
+ return std::make_shared<ConcreteModuleType>(*this);
96
+ }
97
+
98
+ // This determines whether two modules can share a type. The container structs
99
+ // used by ConcreteModuleType have been defined such that operator==
100
+ // implements a meaningful comparison in that context.
101
+ bool equals(const ConcreteModuleTypeBuilder& other) const;
102
+
103
+ struct FunctionAttribute {
104
+ FunctionTypePtr function_;
105
+ py::object pyFunction_;
106
+
107
+ friend bool operator==(
108
+ const FunctionAttribute& lhs,
109
+ const FunctionAttribute& rhs) {
110
+ // Functions are not first class, so we can't do type comparison like a
111
+ // regular attribute. So we do a pointer equality check on the actual
112
+ // Python function object.
113
+ return lhs.pyFunction_.is(rhs.pyFunction_);
114
+ }
115
+ };
116
+
117
+ struct Attribute {
118
+ Attribute(TypePtr type, bool isParam, bool isBuffer)
119
+ : type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {}
120
+
121
+ friend bool operator==(const Attribute& lhs, const Attribute& rhs) {
122
+ return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_;
123
+ }
124
+ TypePtr type_;
125
+ bool isParam_;
126
+ bool isBuffer_;
127
+ };
128
+
129
+ struct ModuleInfo {
130
+ ModuleInfo(std::string name, std::shared_ptr<ConcreteModuleType> meta)
131
+ : name_(std::move(name)), meta_(std::move(meta)) {}
132
+
133
+ friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs);
134
+
135
+ std::string name_;
136
+ std::shared_ptr<ConcreteModuleType> meta_;
137
+ };
138
+
139
+ private:
140
+ ConcreteModuleTypeBuilder() = default;
141
+ ClassTypePtr createTypeFromThis() const;
142
+
143
+ // If true, this type will never compare equally to anything else. This is
144
+ // used if we want to ensure that this type is not shared (for example, if it
145
+ // came from a traced module)
146
+ bool isPoisoned_ = false;
147
+
148
+ // The value of any constants defined by the module.
149
+ std::unordered_map<std::string, IValue> constants_;
150
+ // The types of any attributes
151
+ OrderedDict<std::string, Attribute> attributes_;
152
+ // Overloads, in the same format as `__overloads__` in Python
153
+ std::unordered_map<std::string, std::vector<std::string>> overloads_;
154
+ // Any attributes we failed to convert to TorchScript, along with a hint as to
155
+ // why
156
+ std::unordered_map<std::string, std::string> failedAttributes_;
157
+ // Any attributes that were marked as ignored. They cannot be used in
158
+ // TorchScript but can still be used in ignored function in Python.
159
+ std::unordered_set<std::string> ignoredAttributes_;
160
+ // Any function attributes. These are special right now because functions are
161
+ // not first-class in the type system.
162
+ std::unordered_map<std::string, FunctionAttribute> functionAttributes_;
163
+ // Function attributes that are calls to builtin functions. These get
164
+ // de-sugared directly into the corresponding aten:: call. The map is
165
+ // attribute name -> aten symbol name
166
+ std::unordered_map<std::string, c10::Symbol> builtinFunctions_;
167
+ // The concrete types of any submodules
168
+ std::vector<ModuleInfo> modules_;
169
+ // Hooks to be called before/after forward when the module
170
+ // is called directly. Used to ensure modules have different types
171
+ // when they have different python hooks
172
+ // Actual hooks are added to ClassType directly during compilation
173
+ std::vector<py::object> forwardHooks_;
174
+ std::vector<py::object> forwardPreHooks_;
175
+
176
+ // If something is a ModuleDict/ModuleList, it means:
177
+ // 1. The order of the submodules matters for comparing the type
178
+ // 2. The compiler is allowed to treat it like a dict/tuple
179
+ IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE;
180
+
181
+ // The original `nn.Module` class that we derived this ScriptModule from.
182
+ py::object pyClass_;
183
+
184
+ // NOTE: If you ever add any more state to this struct, you need to make sure
185
+ // operator== still makes sense!
186
+ friend ConcreteModuleType;
187
+ };
188
+
189
+ // Represents a finalized concrete type, used to service ModuleValue::attr calls
190
+ // during method compilation.
191
+ class VISIBILITY_HIDDEN ConcreteModuleType {
192
+ public:
193
+ explicit ConcreteModuleType(ConcreteModuleTypeBuilder data);
194
+
195
+ static std::shared_ptr<ConcreteModuleType> fromJitType(TypePtr type);
196
+
197
+ TypePtr getJitType() const;
198
+ c10::optional<py::object> getPyClass() const;
199
+ IterableModuleKind getIterableModuleKind() const;
200
+ c10::optional<std::vector<std::string>> findOverloads(
201
+ const std::string& name) const;
202
+ c10::optional<Function*> findFunctionAttribute(const std::string& name) const;
203
+ c10::optional<c10::Symbol> findBuiltinFunction(const std::string& name) const;
204
+ std::shared_ptr<ConcreteModuleType> findSubmoduleConcreteType(
205
+ const std::string& name) const;
206
+ c10::optional<std::string> findFailedAttribute(const std::string& name) const;
207
+ bool isIgnoredAttribute(const std::string& name) const;
208
+
209
+ // These getters are only here to return things as types that can be
210
+ // automatically converted by pybind.
211
+ std::unordered_map<std::string, py::object> getConstantsPy() const;
212
+ std::unordered_map<std::string, std::pair<TypePtr, bool>> getAttributesPy()
213
+ const;
214
+ std::vector<std::pair<std::string, std::shared_ptr<ConcreteModuleType>>>
215
+ getModulesPy() const;
216
+
217
+ bool equals(const ConcreteModuleType& other) const {
218
+ if (jitType_ == other.jitType_) {
219
+ // If the computed types are the same, these modules can (obviously) share
220
+ // a type.
221
+ return true;
222
+ }
223
+
224
+ return data_.equals(other.data_);
225
+ }
226
+ bool equals(const ConcreteModuleTypeBuilder& other) const {
227
+ return data_.equals(other);
228
+ }
229
+
230
+ void dump() const;
231
+
232
+ private:
233
+ ConcreteModuleType() = default;
234
+
235
+ // The JIT type derived from this ConcreteModuleType.
236
+ ConcreteModuleTypeBuilder data_;
237
+ TypePtr jitType_;
238
+ };
239
+
240
+ } // namespace jit
241
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Convert a graph with Loads & Stores into SSA form
13
+ TORCH_API void ConvertToSSA(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstddef>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API size_t ComputeEditDistance(
10
+ const char* word1,
11
+ const char* word2,
12
+ size_t maxEditDistance);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+ #include <torch/csrc/jit/frontend/tree.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct Call {
10
+ std::string fn_name;
11
+ SourceRange caller_range;
12
+ };
13
+
14
+ struct TORCH_API ErrorReport : public std::exception {
15
+ ErrorReport(const ErrorReport& e);
16
+
17
+ explicit ErrorReport(SourceRange r);
18
+ explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {}
19
+ explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {}
20
+
21
+ const char* what() const noexcept override;
22
+
23
+ struct TORCH_API CallStack {
24
+ // These functions are used to report why a function was being compiled
25
+ // (i.e. what was the call stack of user functions at compilation time that
26
+ // led to this error)
27
+ CallStack(const std::string& name, const SourceRange& range);
28
+ ~CallStack();
29
+
30
+ // Change the range that is relevant for the current function (i.e. after
31
+ // each successful expression compilation, change it to the next expression)
32
+ static void update_pending_range(const SourceRange& range);
33
+ };
34
+
35
+ static std::string current_call_stack();
36
+
37
+ private:
38
+ template <typename T>
39
+ friend const ErrorReport& operator<<(const ErrorReport& e, const T& t);
40
+
41
+ mutable std::stringstream ss;
42
+ OwnedSourceRange context;
43
+ mutable std::string the_message;
44
+ std::vector<Call> error_stack;
45
+ };
46
+
47
+ template <typename T>
48
+ const ErrorReport& operator<<(const ErrorReport& e, const T& t) {
49
+ e.ss << t;
50
+ return e;
51
+ }
52
+
53
+ } // namespace jit
54
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void TransformExits(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function_schema.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <string>
6
+ #include <variant>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName(
12
+ const std::string& schemaOrName);
13
+ TORCH_API c10::FunctionSchema parseSchema(const std::string& schema);
14
+ TORCH_API c10::OperatorName parseName(const std::string& name);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph);
13
+ TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/jit/api/module.h>
7
+ #include <torch/csrc/jit/frontend/error_report.h>
8
+ #include <torch/csrc/jit/frontend/resolver.h>
9
+ #include <torch/csrc/jit/frontend/sugared_value.h>
10
+ #include <torch/csrc/jit/frontend/tree_views.h>
11
+ #include <torch/csrc/jit/ir/ir.h>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void runCleanupPasses(std::shared_ptr<Graph>& to_clean);
17
+
18
+ TORCH_API bool meaningfulName(const std::string& name);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/frontend/parser_constants.h>
6
+ #include <torch/csrc/jit/frontend/source_range.h>
7
+ #include <torch/csrc/jit/frontend/strtod.h>
8
+ #include <algorithm>
9
+ #include <clocale>
10
+ #include <cstdlib>
11
+ #include <memory>
12
+ #include <sstream>
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ C10_CLANG_DIAGNOSTIC_PUSH()
17
+ #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
18
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
19
+ #endif
20
+
21
+ namespace torch {
22
+ namespace jit {
23
+
24
+ // single character tokens are just the character itself '+'
25
+ // multi-character tokens need an entry here
26
+ // if the third entry is not the empty string, it is used
27
+ // in the lexer to match this token.
28
+
29
+ // These kinds are also used in Tree.h as the kind of the AST node.
30
+ // Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the
31
+ // lexer.
32
+
33
+ #define TC_FORALL_TOKEN_KINDS(_) \
34
+ _(TK_EOF, "eof", "") \
35
+ _(TK_WHITESPACE, "whitespace", "") \
36
+ _(TK_WHITESPACE_EOF, "whitespace_eof", "") \
37
+ _(TK_NUMBER, "number", "") \
38
+ _(TK_NEWLINE, "newline", "") \
39
+ _(TK_INDENT, "indent", "") \
40
+ _(TK_DEDENT, "dedent", "") \
41
+ _(TK_DEF, "def", "def") \
42
+ _(TK_EQUIVALENT, "equivalent", "<=>") \
43
+ _(TK_IDENT, "ident", "") \
44
+ _(TK_STRING, "string", "") \
45
+ _(TK_STRINGLITERAL, "string_literal", "") \
46
+ _(TK_CONST, "const", "") \
47
+ _(TK_LIST, "list", "") \
48
+ _(TK_DICT, "dict", "") \
49
+ _(TK_OPTION, "option", "") \
50
+ _(TK_APPLY, "apply", "") \
51
+ _(TK_COMPREHENSION, "comprehension", "") \
52
+ _(TK_RANGE_CONSTRAINT, "range_constraint", "") \
53
+ _(TK_PARAM, "param", "") \
54
+ _(TK_INFERRED, "inferred", "") \
55
+ _(TK_ACCESS, "access", "") \
56
+ _(TK_ASSIGN, "assign", "") \
57
+ _(TK_AUG_ASSIGN, "aug_assign", "") \
58
+ _(TK_ATTRIBUTE, "attribute", "") \
59
+ _(TK_IF, "if", "if") \
60
+ _(TK_ELSE, "else", "else") \
61
+ _(TK_ELIF, "elif", "elif") \
62
+ _(TK_WHILE, "while", "while") \
63
+ _(TK_EXPR_STMT, "expression statement", "") \
64
+ _(TK_RETURN, "return", "return") \
65
+ _(TK_IS, "is", "is") \
66
+ _(TK_ISNOT, "is not", "is not") \
67
+ _(TK_NE, "ne", "!=") \
68
+ _(TK_EQ, "eq", "==") \
69
+ _(TK_LE, "le", "<=") \
70
+ _(TK_GE, "ge", ">=") \
71
+ _(TK_FLOOR_DIV, "floordiv", "//") \
72
+ _(TK_IF_EXPR, "if", "") \
73
+ _(TK_TRUE, "True", "True") \
74
+ _(TK_FALSE, "False", "False") \
75
+ _(TK_NONE, "None", "None") \
76
+ _(TK_AND, "and", "and") \
77
+ _(TK_OR, "or", "or") \
78
+ _(TK_NOT, "not", "not") \
79
+ _(TK_LSHIFT, "<<", "<<") \
80
+ _(TK_RSHIFT, ">>", ">>") \
81
+ _(TK_CAST, "cast", "") \
82
+ _(TK_PLUS_EQ, "+=", "+=") \
83
+ _(TK_MINUS_EQ, "-=", "-=") \
84
+ _(TK_TIMES_EQ, "*=", "*=") \
85
+ _(TK_DIV_EQ, "/=", "/=") \
86
+ _(TK_MOD_EQ, "%=", "%=") \
87
+ _(TK_BIT_OR_EQ, "|=", "|=") \
88
+ _(TK_BIT_AND_EQ, "&=", "&=") \
89
+ _(TK_BIT_XOR_EQ, "^=", "^=") \
90
+ _(TK_LSHIFT_EQ, "<<=", "<<=") \
91
+ _(TK_RSHIFT_EQ, ">>=", ">>=") \
92
+ _(TK_POW_EQ, "**=", "**=") \
93
+ _(TK_GLOBAL, "global", "global") \
94
+ _(TK_BUILT_IN, "built-in", "") \
95
+ _(TK_SUBSCRIPT, "subscript", "") \
96
+ _(TK_VAR, "variable", "") \
97
+ _(TK_NOTHING, "nothing", "") \
98
+ _(TK_DICT_LITERAL, "dict-literal", "") \
99
+ _(TK_LIST_LITERAL, "list-literal", "") \
100
+ _(TK_TUPLE_LITERAL, "tuple-literal", "") \
101
+ _(TK_FOR, "for", "for") \
102
+ _(TK_IN, "in", "in") \
103
+ _(TK_NOTIN, "not in", "not in") \
104
+ _(TK_STARRED, "starred", "") \
105
+ _(TK_UNARY_MINUS, "unary minus", "") \
106
+ _(TK_POW, "pow operator", "**") \
107
+ _(TK_ARROW, "arrow", "->") \
108
+ _(TK_DECL, "decl", "") \
109
+ _(TK_SLICE_EXPR, "slice expr", "") \
110
+ _(TK_TYPE_COMMENT, "type comment", "# type:") \
111
+ _(TK_RAISE, "raise", "raise") \
112
+ _(TK_ASSERT, "assert", "assert") \
113
+ _(TK_DOTS, "dots", "...") \
114
+ _(TK_LIST_COMP, "list comprehension", "") \
115
+ _(TK_DICT_COMP, "dict comprehension", "") \
116
+ _(TK_BREAK, "break", "break") \
117
+ _(TK_CONTINUE, "continue", "continue") \
118
+ _(TK_DELETE, "del", "del") \
119
+ _(TK_PASS, "pass", "pass") \
120
+ _(TK_CLASS_DEF, "class", "class") \
121
+ _(TK_IMPORT, "import", "import") \
122
+ _(TK_WITH, "with", "with") \
123
+ _(TK_WITH_ITEM, "withitem", "") \
124
+ _(TK_AS, "as", "as") \
125
+ _(TK_PROP, "property", "") \
126
+ _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \
127
+ _(TK_NONE_TYPE, "NoneType", "NoneType")
128
+
129
+ enum TokenKind {
130
+ // we use characters to represent themselves so skip all valid characters
131
+ // before
132
+ // assigning enum values to multi-char tokens.
133
+ TK_DUMMY_START = 256,
134
+ #define DEFINE_TOKEN(tok, _, _2) tok,
135
+ TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN)
136
+ #undef DEFINE_TOKEN
137
+ };
138
+
139
+ TORCH_API std::string kindToString(int kind);
140
+ TORCH_API int stringToKind(const std::string& str);
141
+
142
+ // nested hash tables that indicate char-by-char what is a valid token.
143
+ struct TokenTrie;
144
+ using TokenTrieRef = std::unique_ptr<TokenTrie>;
145
+ struct TokenTrie {
146
+ TokenTrie() : kind(0) {}
147
+ void insert(const char* str, int tok) {
148
+ if (*str == '\0') {
149
+ AT_ASSERT(kind == 0);
150
+ kind = tok;
151
+ return;
152
+ }
153
+
154
+ for (size_t i = 0, e = child_chars.size(); i < e; ++i) {
155
+ if (child_chars[i] == *str) {
156
+ child_tries[i]->insert(str + 1, tok);
157
+ return;
158
+ }
159
+ }
160
+
161
+ child_chars.emplace_back(*str);
162
+ child_tries.emplace_back(std::make_unique<TokenTrie>());
163
+ child_tries.back()->insert(str + 1, tok);
164
+ }
165
+ int kind; // 0 == invalid token
166
+
167
+ std::vector<char> child_chars;
168
+ std::vector<TokenTrieRef> child_tries;
169
+ };
170
+
171
+ // stuff that is shared against all TC lexers/parsers and is initialized only
172
+ // once.
173
+ struct TORCH_API SharedParserData {
174
+ SharedParserData() : head(new TokenTrie()) {
175
+ std::stringstream ss;
176
+ for (const char* c = valid_single_char_tokens; *c; c++) {
177
+ std::string str(1, *c);
178
+ head->insert(str.c_str(), *c);
179
+ }
180
+
181
+ #define ADD_CASE(tok, _, tokstring) \
182
+ if (*(tokstring) != '\0') { \
183
+ head->insert((tokstring), (tok)); \
184
+ }
185
+ TC_FORALL_TOKEN_KINDS(ADD_CASE)
186
+ #undef ADD_CASE
187
+ }
188
+
189
+ bool match(
190
+ StringCordView::Iterator pos,
191
+ bool continuation, // are we inside a scope where newlines don't count
192
+ // (e.g. inside parens)
193
+ bool whitespace_token, // should we treat whitespace as a token
194
+ int* kind,
195
+ StringCordView::Iterator* start,
196
+ StringCordView::Iterator* end) {
197
+ *start = pos;
198
+ // skip whitespace
199
+ while (pos.has_next() && isblank(*pos)) {
200
+ ++pos;
201
+ }
202
+
203
+ // special handling
204
+ if (pos.has_next()) {
205
+ if (*pos == '#' && !isTypeComment(pos)) {
206
+ // skip comments
207
+ while (pos.has_next() && *pos != '\n')
208
+ ++pos;
209
+ // tail call, handle whitespace and more comments
210
+ return match(pos, continuation, whitespace_token, kind, start, end);
211
+ }
212
+ if (*pos == '\\') {
213
+ auto newiter = pos;
214
+ ++newiter;
215
+ if (newiter.has_next() && *newiter == '\n' && !whitespace_token) {
216
+ ++newiter;
217
+ return match(newiter, continuation, false, kind, start, end);
218
+ }
219
+ }
220
+ if (*pos == '\n') {
221
+ return match(++pos, continuation, !continuation, kind, start, end);
222
+ }
223
+ }
224
+ // we handle white space before EOF because in the case we have something
225
+ // like the following where we need to generate the dedent token if foo:
226
+ // ...
227
+ // else:
228
+ // pass
229
+ if (whitespace_token) {
230
+ *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE;
231
+ *end = pos;
232
+ return true;
233
+ }
234
+ if (!pos.has_next()) {
235
+ *kind = TK_EOF;
236
+ *start = pos;
237
+ *end = *start;
238
+ return true;
239
+ }
240
+ // invariant: the next token is not whitespace or newline
241
+ *start = pos;
242
+ // check for a valid number
243
+ size_t len;
244
+ if (isNumber(pos.rest_line(), 0, &len)) {
245
+ *end = *start;
246
+ *end += len;
247
+ *kind = TK_NUMBER;
248
+ return true;
249
+ }
250
+ // check for string
251
+ if (isString(pos.rest_line(), 0, &len)) {
252
+ *kind = TK_STRINGLITERAL;
253
+ *end = *start;
254
+ *end += len;
255
+ return true;
256
+ }
257
+
258
+ // check for either an ident or a token
259
+ // ident tracks whether what we have scanned so far could be an identifier
260
+ // matched indicates if we have found any match.
261
+ bool matched = false;
262
+ bool ident = true;
263
+ TokenTrie* cur = head.get();
264
+ // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr);
265
+ // i++)
266
+ for (size_t i = 0; pos.has_next() && (ident || cur != nullptr);
267
+ ++pos, ++i) {
268
+ ident = ident && validIdent(i, *pos);
269
+ if (ident) {
270
+ matched = true;
271
+ *end = pos.next_iter();
272
+ *kind = TK_IDENT;
273
+ }
274
+ // check for token second, so that e.g. 'max' matches the token TK_MAX
275
+ // rather the
276
+ // identifier 'max'
277
+ if (cur) {
278
+ const auto begin_it = cur->child_chars.begin();
279
+ const auto end_it = cur->child_chars.end();
280
+ const auto ch_it = std::find(begin_it, end_it, *pos);
281
+
282
+ cur = (ch_it == end_it) ? nullptr
283
+ : cur->child_tries[ch_it - begin_it].get();
284
+
285
+ if (cur && cur->kind != 0) {
286
+ matched = true;
287
+ *end = pos.next_iter();
288
+ *kind = cur->kind;
289
+ }
290
+ }
291
+ }
292
+ return matched;
293
+ }
294
+
295
+ bool isUnary(int kind, int* prec);
296
+ bool isBinary(int kind, int* prec);
297
+ bool isRightAssociative(int kind) {
298
+ switch (kind) {
299
+ case '?':
300
+ case TK_POW:
301
+ case TK_IF:
302
+ return true;
303
+ default:
304
+ return false;
305
+ }
306
+ }
307
+
308
+ private:
309
+ bool validIdent(size_t i, char n) {
310
+ return isalpha(n) || n == '_' || (i > 0 && isdigit(n));
311
+ }
312
+
313
+ // 1. skip whitespace
314
+ // 2. handle comment or newline
315
+ //
316
+ bool isNumber(c10::string_view str, size_t start, size_t* len) {
317
+ char first = str[start];
318
+ // strtod allows numbers to start with + or - or nan or inf
319
+ // http://en.cppreference.com/w/cpp/string/byte/strtof
320
+ // but we want only the number part, otherwise 1+3 will turn into two
321
+ // adjacent numbers in the lexer
322
+ if (first == '-' || first == '+' || isalpha(first))
323
+ return false;
324
+ const char* startptr = str.data() + start;
325
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
326
+ char* endptr;
327
+ torch::jit::strtod_c(startptr, &endptr);
328
+ *len = endptr - startptr;
329
+ // check if the number is complex valued
330
+ // access is safe because string is assumed to be null terminated
331
+ if (endptr != nullptr && *endptr == 'j') {
332
+ *len += 1;
333
+ }
334
+ return *len > 0;
335
+ }
336
+
337
+ bool isCharCount(char c, c10::string_view str, size_t start, int len) {
338
+ // count checks from [start, start + len)
339
+ return start + len <= str.size() &&
340
+ std::count(str.begin() + start, str.begin() + start + len, c) == len;
341
+ }
342
+
343
+ // python concatenates all adjacent strings "a" "b" == "ab"
344
+ // strings can be enclosed with 1 or 3 single or double quotes
345
+ // if enclosed with 3 quotes newlines are valid
346
+ // as elsewhere, backslash and new line should be ignored
347
+ bool isString(c10::string_view str, size_t start, size_t* len) {
348
+ char quote = str[start];
349
+ if (quote != '\"' && quote != '\'')
350
+ return false;
351
+ int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1;
352
+
353
+ // end is now set past the opening quotation marks
354
+ size_t end = start + quote_len;
355
+ while (end < str.size() && !isCharCount(quote, str, end, quote_len)) {
356
+ if (str[end] == '\n' && quote_len != 3) {
357
+ return false;
358
+ }
359
+ // handle escaped characters. advances past escaped quotation marks,
360
+ // escaped newlines and escaped backslashes
361
+ // multi-char escapes like \x1A are handled fine here because the
362
+ // remainder of the escape are valid string characters anyway
363
+ if (str[end] == '\\') {
364
+ end++;
365
+ }
366
+ end++;
367
+ }
368
+ // set length equal to the complete string including quotations
369
+ *len = end - start + quote_len;
370
+ // if end finished without going past the last character of the string than
371
+ // there is a match
372
+ return end < str.size();
373
+ }
374
+
375
+ bool isblank(int n) {
376
+ return isspace(n) && n != '\n';
377
+ }
378
+
379
+ bool isTypeComment(StringCordView::Iterator str_iter) {
380
+ c10::string_view rest_line = str_iter.rest_line();
381
+ const std::string type_string = "# type:";
382
+ if (rest_line.size() < type_string.length()) {
383
+ return false;
384
+ }
385
+ auto match_string = rest_line.substr(0, type_string.size());
386
+ return match_string == type_string;
387
+ }
388
+
389
+ // Make an exception ignoring comments for type annotation comments
390
+ bool isTypeComment(StringCordView str, size_t pos) {
391
+ const std::string type_string = "# type:";
392
+ if (str.size() < pos + type_string.length()) {
393
+ return false;
394
+ }
395
+ auto match_string = str.substr(pos, type_string.size());
396
+ return match_string == type_string;
397
+ }
398
+
399
+ TokenTrieRef head;
400
+ };
401
+
402
+ TORCH_API SharedParserData& sharedParserData();
403
+
404
+ struct Token {
405
+ int kind;
406
+ SourceRange range;
407
+ Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {}
408
+ std::string text() {
409
+ return std::string(range.token_text());
410
+ }
411
+ std::string kindString() const {
412
+ return kindToString(kind);
413
+ }
414
+ };
415
+
416
+ struct Lexer {
417
+ explicit Lexer(std::shared_ptr<Source> source)
418
+ : source(std::move(source)),
419
+ pos(0),
420
+ nesting(0),
421
+ indent_stack(),
422
+ next_tokens(),
423
+ shared(sharedParserData()) {
424
+ auto first_indent = lexRaw(true);
425
+ indent_stack.push_back(first_indent.range.size());
426
+ lex();
427
+ }
428
+ // Return the current token, and then move to the next one
429
+ Token next() {
430
+ if (next_tokens.empty())
431
+ reportError("Lexer invariant violated: empty token queue");
432
+ Token r = std::move(next_tokens.front());
433
+ next_tokens.erase(next_tokens.begin());
434
+ if (next_tokens.empty()) {
435
+ lex();
436
+ }
437
+ return r;
438
+ }
439
+ // Skip the current token if it matches the given kind
440
+ bool nextIf(int kind) {
441
+ if (cur().kind != kind)
442
+ return false;
443
+ next();
444
+ return true;
445
+ }
446
+
447
+ [[noreturn]] void reportError(const std::string& what) {
448
+ reportError(what, cur());
449
+ }
450
+ [[noreturn]] void reportError(const std::string& what, const Token& t) {
451
+ std::stringstream ss;
452
+ ss << what << ":\n";
453
+ t.range.highlight(ss);
454
+ throw std::runtime_error(ss.str());
455
+ }
456
+ [[noreturn]] void expected(const std::string& what, const Token& t) {
457
+ std::stringstream ss;
458
+ ss << "expected " << what << " but found '" << t.kindString()
459
+ << "' here:\n";
460
+ t.range.highlight(ss);
461
+ throw std::runtime_error(ss.str());
462
+ }
463
+ [[noreturn]] void expected(const std::string& what) {
464
+ expected(what, cur());
465
+ }
466
+ // Check that the current token has a given kind, return the current token,
467
+ // and advance to the next one.
468
+ Token expect(int kind) {
469
+ if (cur().kind != kind) {
470
+ expected(kindToString(kind));
471
+ }
472
+ return next();
473
+ }
474
+ Token& lookahead() {
475
+ if (next_tokens.size() < 2) {
476
+ lex();
477
+ }
478
+ return next_tokens[1];
479
+ }
480
+ Token& cur() {
481
+ return next_tokens.front();
482
+ }
483
+
484
+ private:
485
+ void lex() {
486
+ auto r = lexRaw();
487
+ switch (r.kind) {
488
+ case '(':
489
+ case '[':
490
+ case '{':
491
+ nesting++;
492
+ break;
493
+ case ')':
494
+ case ']':
495
+ case '}':
496
+ nesting--;
497
+ break;
498
+ case TK_WHITESPACE:
499
+ case TK_WHITESPACE_EOF: {
500
+ const auto depth = static_cast<int64_t>(
501
+ r.kind == TK_WHITESPACE_EOF ? indent_stack.front()
502
+ : r.range.size());
503
+ // note: TK_WHITESPACE_EOF is whitespace right before the EOF token
504
+ // just like we allow the code to be indented to a particular initial
505
+ // indent level, we allow the final indent to be anything and set
506
+ // it back to the initial indent level. This allows the code to be
507
+ // put into string literals inside code without worrying about final
508
+ // whitespace
509
+ if (depth > indent_stack.back()) {
510
+ indent_stack.push_back(depth);
511
+ r.kind = TK_INDENT;
512
+ } else if (depth == indent_stack.back()) {
513
+ r.kind = TK_NEWLINE;
514
+ } else {
515
+ next_tokens.emplace_back(TK_NEWLINE, r.range);
516
+ while (indent_stack.back() != depth) {
517
+ indent_stack.pop_back();
518
+ next_tokens.emplace_back(TK_DEDENT, r.range);
519
+ if (indent_stack.empty()) {
520
+ reportError("invalid indent level " + std::to_string(depth), r);
521
+ }
522
+ }
523
+ return; // We've already queued the tokens
524
+ }
525
+ } break;
526
+ default:
527
+ break;
528
+ }
529
+ next_tokens.push_back(std::move(r));
530
+ }
531
+ Token lexRaw(bool whitespace_token = false) {
532
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
533
+ int kind;
534
+ AT_ASSERT(source);
535
+ if (current == nullptr) {
536
+ AT_ASSERT(pos == 0);
537
+ current = std::make_unique<StringCordView::Iterator>(
538
+ source->text_str().begin());
539
+ }
540
+
541
+ StringCordView::Iterator start_iter = *current;
542
+ StringCordView::Iterator end_iter = *current;
543
+ if (!shared.match(
544
+ *current,
545
+ nesting > 0,
546
+ whitespace_token,
547
+ &kind,
548
+ &start_iter,
549
+ &end_iter)) {
550
+ expected(
551
+ "a valid token",
552
+ Token(
553
+ **current,
554
+ SourceRange(source, start_iter, start_iter.pos() + 1)));
555
+ }
556
+
557
+ auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos()));
558
+ pos = end_iter.pos();
559
+ *current = end_iter;
560
+ return t;
561
+ }
562
+
563
+ std::shared_ptr<Source> source;
564
+ std::unique_ptr<StringCordView::Iterator> current;
565
+ size_t pos;
566
+ size_t nesting; // depth of ( [ { nesting...
567
+ std::vector<int> indent_stack; // stack of indentation level of blocks
568
+ // Invariant: this should always contain at least a single element
569
+ std::vector<Token> next_tokens;
570
+ SharedParserData& shared;
571
+ };
572
+ } // namespace jit
573
+ } // namespace torch
574
+
575
+ C10_CLANG_DIAGNOSTIC_POP()
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ // Simple data structure for containing a type T in nested control blocks
10
+ // Should only be used after initial compilation where type checking and
11
+ // loads and stores are emitted
12
+
13
+ template <typename T>
14
+ struct MiniEnvironment {
15
+ MiniEnvironment(Block* b, std::shared_ptr<MiniEnvironment> next = nullptr)
16
+ : next(std::move(next)) {}
17
+
18
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
19
+ std::shared_ptr<MiniEnvironment<T>> next;
20
+
21
+ T findInThisFrame(const std::string& name) {
22
+ auto it = table.find(name);
23
+ if (it != table.end()) {
24
+ return it->second;
25
+ }
26
+ return nullptr;
27
+ }
28
+
29
+ T findInAnyFrame(const std::string& name) {
30
+ for (auto runner = this; runner; runner = runner->next.get()) {
31
+ if (auto r = runner->findInThisFrame(name)) {
32
+ return r;
33
+ }
34
+ }
35
+ return nullptr;
36
+ }
37
+
38
+ void setVar(const std::string& name, T value) {
39
+ table[name] = value;
40
+ }
41
+
42
+ std::vector<std::string> definedVariables() {
43
+ std::vector<std::string> result;
44
+ result.reserve(table.size());
45
+ for (auto& kv : table) {
46
+ result.push_back(kv.first);
47
+ }
48
+ std::sort(result.begin(), result.end());
49
+ return result;
50
+ }
51
+
52
+ private:
53
+ std::unordered_map<std::string, T> table;
54
+ };
55
+
56
+ } // namespace jit
57
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/qualified_name.h>
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ /**
10
+ * class NameMangler
11
+ *
12
+ * Utility to mangle qualified names in order to make them unique. We use this
13
+ * in various places where we to de-duplicate qualified names.
14
+ */
15
+ class TORCH_API NameMangler {
16
+ public:
17
+ // Given a qualified name, return a mangled version that is guaranteed to be
18
+ // unique with respect to previous/future calls of `mangled()` on this name
19
+ // mangler instance.
20
+ c10::QualifiedName mangle(const c10::QualifiedName& name);
21
+
22
+ private:
23
+ size_t mangleIndex_ = 0;
24
+ };
25
+
26
+ } // namespace jit
27
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/lexer.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ inline bool isCharCount(char c, const std::string& str, size_t start, int len) {
10
+ // count checks from [start, start + len)
11
+ return start + len <= str.size() &&
12
+ std::count(str.begin() + start, str.begin() + start + len, c) == len;
13
+ }
14
+
15
+ inline c10::optional<char> parseOctal(const std::string& str, size_t pos) {
16
+ //\xxx where x are 0-7
17
+ if (pos + 3 >= str.size())
18
+ return c10::nullopt;
19
+ size_t c = 0;
20
+ for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) {
21
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
22
+ int d = str[pos + i];
23
+ if (d < '0' || d > '7')
24
+ return c10::nullopt;
25
+ c += b * (d - '0');
26
+ }
27
+ if (c >= 256)
28
+ return c10::nullopt;
29
+ return c;
30
+ }
31
+
32
+ inline std::string parseStringLiteral(
33
+ const SourceRange& range,
34
+ const std::string& str) {
35
+ int quote_len = isCharCount(str[0], str, 0, 3) ? 3 : 1;
36
+ auto ret_str = str.substr(quote_len, str.size() - quote_len * 2);
37
+ size_t pos = ret_str.find('\\');
38
+ while (pos != std::string::npos) {
39
+ // invariant: pos has to escape a character because it is a valid string
40
+ char c = ret_str[pos + 1];
41
+ size_t to_erase = 2;
42
+ switch (ret_str[pos + 1]) {
43
+ case '\\':
44
+ case '\'':
45
+ case '\"':
46
+ case '\n':
47
+ break;
48
+ case 'a':
49
+ c = '\a';
50
+ break;
51
+ case 'b':
52
+ c = '\b';
53
+ break;
54
+ case 'f':
55
+ c = '\f';
56
+ break;
57
+ case 'n':
58
+ c = '\n';
59
+ break;
60
+ case 'v':
61
+ c = '\v';
62
+ break;
63
+ case 't':
64
+ c = '\t';
65
+ break;
66
+ case 'x':
67
+ throw ErrorReport(range) << "unsupported hex specifier";
68
+ case 'u':
69
+ case 'U':
70
+ throw ErrorReport(range) << "unsupported unicode specifier";
71
+ default:
72
+ // octal value in format \nnn, n is [0-7]
73
+ if (auto v = parseOctal(ret_str, pos)) {
74
+ to_erase = 4;
75
+ c = *v;
76
+ } else {
77
+ throw ErrorReport(range) << " ill formed octal specifier";
78
+ }
79
+ }
80
+ ret_str.replace(pos, to_erase, /* num copies */ 1, c);
81
+ pos = ret_str.find('\\', pos + 1);
82
+ }
83
+ return ret_str;
84
+ }
85
+
86
+ } // namespace jit
87
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <torch/csrc/jit/frontend/tree.h>
4
+ #include <torch/csrc/jit/frontend/tree_views.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Decl;
11
+ struct ParserImpl;
12
+ struct Lexer;
13
+
14
+ TORCH_API Decl mergeTypesFromTypeComment(
15
+ const Decl& decl,
16
+ const Decl& type_annotation_decl,
17
+ bool is_method);
18
+
19
+ struct TORCH_API Parser {
20
+ explicit Parser(const std::shared_ptr<Source>& src);
21
+ TreeRef parseFunction(bool is_method);
22
+ TreeRef parseClass();
23
+ Decl parseTypeComment();
24
+ Expr parseExp();
25
+ Lexer& lexer();
26
+ ~Parser();
27
+
28
+ private:
29
+ std::unique_ptr<ParserImpl> pImpl;
30
+ };
31
+
32
+ } // namespace jit
33
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~";
6
+ } // namespace jit
7
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <ATen/core/qualified_name.h>
5
+ #include <torch/csrc/jit/frontend/sugared_value.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Resolver;
11
+ using ResolverPtr = std::shared_ptr<Resolver>;
12
+
13
+ /**
14
+ * class Resolver
15
+ *
16
+ * Represents an "outer environment" in which we an look up names and return
17
+ * a corresponding SugaredValue. This is used during compilation to resolve
18
+ * references to names which are not defined internal to the graph.
19
+ *
20
+ * Example: PythonResolver looks at the enclosing Python scope for `name`.
21
+ *
22
+ * NOTE: When adding methods, keep this an abstract class (i.e. all new methods
23
+ * should be purely virtual). Resist the urge to provide a default
24
+ * implementation; you should explicitly think about how each resolver would
25
+ * handle the method.
26
+ */
27
+ struct Resolver {
28
+ virtual ~Resolver() = default;
29
+
30
+ // Resolve a given name to a SugaredValue. This takes the method `m` that the
31
+ // caller is currently constructing, since we may need to insert nodes into
32
+ // the graph to create a value.
33
+ virtual std::shared_ptr<SugaredValue> resolveValue(
34
+ const std::string& name,
35
+ GraphFunction& m,
36
+ const SourceRange& loc) {
37
+ return nullptr;
38
+ }
39
+
40
+ // Resolve `name` to a TypePtr.
41
+ virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) {
42
+ return nullptr;
43
+ }
44
+ };
45
+
46
+ // A resolver that only understands "torch.foo()" lookups.
47
+ struct NativeResolver : public Resolver {
48
+ std::shared_ptr<SugaredValue> resolveValue(
49
+ const std::string& name,
50
+ GraphFunction& m,
51
+ const SourceRange& loc) override {
52
+ if (name == "torch") {
53
+ return std::make_shared<BuiltinModule>("aten");
54
+ }
55
+ return nullptr;
56
+ }
57
+
58
+ TypePtr resolveType(const std::string& name, const SourceRange& loc)
59
+ override {
60
+ return nullptr;
61
+ }
62
+ };
63
+
64
+ inline std::shared_ptr<NativeResolver> nativeResolver() {
65
+ return std::make_shared<NativeResolver>();
66
+ }
67
+ } // namespace jit
68
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/ir/named_value.h>
5
+
6
+ #include <ATen/core/function_schema.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ // Try to match a list of inputs and keyword 'attributes' to this
12
+ // schema. Return the flat list of positional inputs to the call or
13
+ // `c10::nullopt` on failure (`failure_messages` contains a good error
14
+ // report in this case)
15
+
16
+ struct MatchedSchema {
17
+ std::vector<Value*> inputs;
18
+ std::vector<TypePtr> return_types;
19
+ c10::OptNameList return_field_names;
20
+ std::string schema_name;
21
+ };
22
+
23
+ TORCH_API bool isBlockListedSchema(const FunctionSchema& schema);
24
+
25
+ TORCH_API MatchedSchema matchSchema(
26
+ const ::c10::FunctionSchema& schema,
27
+ const SourceRange& loc,
28
+ Graph& graph,
29
+ at::ArrayRef<NamedValue> args,
30
+ at::ArrayRef<NamedValue> kwargs,
31
+ const c10::optional<NamedValue>& self = c10::nullopt);
32
+
33
+ TORCH_API std::pair<size_t, MatchedSchema> matchSchemas(
34
+ const std::vector<const ::c10::FunctionSchema*>& schemas,
35
+ const SourceRange& loc,
36
+ Graph& graph,
37
+ at::ArrayRef<NamedValue> args,
38
+ at::ArrayRef<NamedValue> kwargs,
39
+ const c10::optional<NamedValue>& self = c10::nullopt,
40
+ bool render_errors = false);
41
+
42
+ TORCH_API bool convertibleToList(
43
+ const TypePtr& type,
44
+ const TypePtr& list_type_);
45
+
46
+ TORCH_API std::string getFullSchemaName(const ::c10::FunctionSchema& schema);
47
+
48
+ TORCH_API Value* emitBuiltinCall(
49
+ const SourceRange& loc,
50
+ Graph& graph,
51
+ Symbol name,
52
+ at::ArrayRef<NamedValue> args,
53
+ at::ArrayRef<NamedValue> kwargs,
54
+ const c10::optional<NamedValue>& self = c10::nullopt);
55
+
56
+ TORCH_API c10::optional<size_t> findInputWithName(
57
+ const std::string& name,
58
+ at::ArrayRef<NamedValue> kwargs,
59
+ bool is_aten = false);
60
+
61
+ // applies implicit conversion from value trying to turn it into type
62
+ // concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type)
63
+ TORCH_API Value* tryConvertToType(
64
+ const SourceRange& loc,
65
+ Graph& graph,
66
+ const TypePtr& concrete_type,
67
+ Value* value,
68
+ bool allow_conversions);
69
+ } // namespace jit
70
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/alias_info.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/FunctionRef.h>
7
+ #include <torch/csrc/jit/frontend/lexer.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ using TypePtr = c10::TypePtr;
13
+
14
+ struct TORCH_API SchemaTypeParser {
15
+ TypePtr parseBaseType();
16
+ c10::optional<c10::AliasInfo> parseAliasAnnotation();
17
+ std::pair<TypePtr, c10::optional<c10::AliasInfo>> parseType();
18
+ std::tuple</*fake*/ TypePtr, /*real*/ TypePtr, c10::optional<c10::AliasInfo>>
19
+ parseFakeAndRealType();
20
+ c10::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
21
+ TypePtr parseRefinedTensor();
22
+
23
+ SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types)
24
+ : complete_tensor_types(parse_complete_tensor_types), L(L) {}
25
+
26
+ private:
27
+ c10::optional<bool> tryToParseRequiresGrad();
28
+ c10::optional<c10::Device> tryToParseDeviceType();
29
+ void parseList(
30
+ int begin,
31
+ int sep,
32
+ int end,
33
+ c10::function_ref<void()> callback);
34
+
35
+ bool complete_tensor_types;
36
+ Lexer& L;
37
+ size_t next_id = 0;
38
+ };
39
+ } // namespace jit
40
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/frontend/resolver.h>
5
+ #include <torch/csrc/jit/frontend/tree_views.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /**
11
+ * class ScriptTypeParser
12
+ *
13
+ * Parses expressions in our typed AST format (TreeView) into types and
14
+ * typenames.
15
+ */
16
+ class TORCH_API ScriptTypeParser {
17
+ public:
18
+ explicit ScriptTypeParser() = default;
19
+ explicit ScriptTypeParser(ResolverPtr resolver)
20
+ : resolver_(std::move(resolver)) {}
21
+
22
+ c10::TypePtr parseTypeFromExpr(const Expr& expr) const;
23
+
24
+ c10::optional<std::pair<c10::TypePtr, int32_t>> parseBroadcastList(
25
+ const Expr& expr) const;
26
+
27
+ c10::TypePtr parseType(const std::string& str);
28
+
29
+ FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self);
30
+
31
+ c10::IValue parseClassConstant(const Assign& assign);
32
+
33
+ private:
34
+ c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const;
35
+
36
+ c10::optional<std::string> parseBaseTypeName(const Expr& expr) const;
37
+ at::TypePtr subscriptToType(
38
+ const std::string& typeName,
39
+ const Subscript& subscript) const;
40
+ std::vector<IValue> evaluateDefaults(
41
+ const SourceRange& r,
42
+ const std::vector<Expr>& default_types,
43
+ const std::vector<Expr>& default_exprs);
44
+ std::vector<Argument> parseArgsFromDecl(const Decl& decl, bool skip_self);
45
+
46
+ std::vector<Argument> parseReturnFromDecl(const Decl& decl);
47
+
48
+ ResolverPtr resolver_ = nullptr;
49
+
50
+ // Need to use `evaluateDefaults` in serialization
51
+ friend struct ConstantTableValue;
52
+ friend struct SourceImporterImpl;
53
+ };
54
+ } // namespace jit
55
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <c10/util/Optional.h>
4
+
5
+ #include <algorithm>
6
+ #include <iterator>
7
+ #include <memory>
8
+ #include <numeric>
9
+ #include <ostream>
10
+ #include <regex>
11
+ #include <sstream>
12
+ #include <unordered_map>
13
+
14
+ namespace torch::jit {
15
+
16
+ class SourceRangeUnpickler;
17
+ struct SourceRange;
18
+
19
+ // A stringlike class backed by a vector of string_view
20
+ // the string represented are logically the concatenation of the string_views
21
+ // This has advantage of not needing continues memory.
22
+ struct TORCH_API StringCordView {
23
+ StringCordView();
24
+ StringCordView(const StringCordView&) = default;
25
+ StringCordView(StringCordView&&) noexcept = default;
26
+ StringCordView(
27
+ std::vector<c10::string_view> inputs,
28
+ std::vector<std::shared_ptr<std::string>> ownerships);
29
+
30
+ StringCordView& operator=(const StringCordView&) = default;
31
+ StringCordView& operator=(StringCordView&&) noexcept = default;
32
+
33
+ size_t size() const {
34
+ return accumulated_sizes_.back();
35
+ }
36
+
37
+ size_t find(const std::string& tok, size_t start) const;
38
+ size_t find_regex(const std::string& tok, size_t start) const;
39
+ StringCordView substr(size_t start, size_t size) const;
40
+
41
+ char at(size_t index) const {
42
+ return *iter_for_pos(index);
43
+ }
44
+ char operator[](size_t index) const {
45
+ return at(index);
46
+ }
47
+
48
+ std::string str() const {
49
+ std::stringstream ss;
50
+ for (auto s : pieces_) {
51
+ ss << std::string(s);
52
+ }
53
+ return ss.str();
54
+ }
55
+
56
+ bool operator==(const std::string& rhs) const;
57
+
58
+ bool operator==(const StringCordView& rhs) const;
59
+
60
+ c10::string_view piece(size_t index) const {
61
+ return pieces_[index];
62
+ }
63
+
64
+ struct Iterator {
65
+ Iterator(
66
+ const StringCordView* str,
67
+ size_t start_line,
68
+ size_t start_pos,
69
+ size_t size)
70
+ : line_(start_line), pos_(start_pos), str_(str), size_(size) {}
71
+ explicit Iterator(const StringCordView* str)
72
+ : Iterator(str, 0, 0, str->size()) {}
73
+
74
+ Iterator() : Iterator(nullptr, 0, 0, 0) {}
75
+
76
+ Iterator(const Iterator&) = default;
77
+ Iterator(Iterator&&) = default;
78
+ Iterator& operator=(const Iterator&) = default;
79
+ Iterator& operator=(Iterator&&) = default;
80
+
81
+ Iterator operator++() {
82
+ if (size_ == 0) {
83
+ return *this;
84
+ }
85
+ if ((pos_ + 1) < str_->pieces_[line_].size()) {
86
+ pos_++;
87
+ } else {
88
+ line_++;
89
+ pos_ = 0;
90
+ }
91
+ return *this;
92
+ }
93
+
94
+ Iterator operator++(int) {
95
+ Iterator prev(*this);
96
+ ++(*this);
97
+ return prev;
98
+ }
99
+
100
+ Iterator next_iter() const {
101
+ Iterator next(*this);
102
+ ++next;
103
+ return next;
104
+ }
105
+
106
+ Iterator& operator+=(size_t num) {
107
+ if (!has_next()) {
108
+ return *this;
109
+ }
110
+ size_t target_pos = pos_ + num;
111
+ if (target_pos >= str_->accumulated_sizes_[line_] &&
112
+ (line_ + 1) < str_->accumulated_sizes_.size() &&
113
+ target_pos < str_->accumulated_sizes_[line_ + 1]) {
114
+ pos_ = target_pos;
115
+ return *this;
116
+ }
117
+
118
+ size_t target_abs_pos = pos() + num;
119
+ *this = str_->iter_for_pos(target_abs_pos);
120
+ return *this;
121
+ }
122
+
123
+ bool operator==(const Iterator& rhs) const {
124
+ if (!has_next() && !rhs.has_next()) {
125
+ return true;
126
+ }
127
+ return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_);
128
+ }
129
+ bool operator!=(const Iterator& rhs) {
130
+ return !((*this) == rhs);
131
+ }
132
+ bool has_next() const {
133
+ return size_ > 0 && (line_ < str_->pieces_.size());
134
+ }
135
+
136
+ char operator*() const {
137
+ TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size());
138
+ TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size());
139
+ return str_->pieces_[line_].at(pos_);
140
+ }
141
+
142
+ // returns rest of the line of the current iterator
143
+ c10::string_view rest_line() const {
144
+ if (line_ >= str_->pieces_.size()) {
145
+ return "";
146
+ }
147
+
148
+ c10::string_view cur_line = str_->pieces_[line_];
149
+ return cur_line.substr(pos_, std::string::npos);
150
+ }
151
+
152
+ size_t pos() const {
153
+ if (size_ == 0) {
154
+ return 0;
155
+ }
156
+ return str_->accumulated_sizes_[line_] + pos_;
157
+ }
158
+
159
+ private:
160
+ size_t line_;
161
+ size_t pos_;
162
+ const StringCordView* str_;
163
+ size_t size_;
164
+ friend struct StringCordView;
165
+ };
166
+
167
+ Iterator begin() const {
168
+ return Iterator(this, 0, 0, size());
169
+ }
170
+ Iterator end() const {
171
+ return Iterator(this, pieces_.size(), 0, 0);
172
+ }
173
+ Iterator iter_for_pos(size_t pos) const;
174
+
175
+ private:
176
+ std::vector<c10::string_view> pieces_;
177
+ std::vector<size_t> accumulated_sizes_;
178
+ std::vector<std::shared_ptr<std::string>> owned_strings_;
179
+ };
180
+
181
+ // Source represents a code segment. It keeps track of:
182
+ // - text_view : the view into text of the code segment
183
+ // - filename (optional) : if present, represents the name of the file from
184
+ // which the code segment originated.
185
+ // - starting_line_no : represents the line in the original file where the
186
+ // code segment started.
187
+ struct TORCH_API Source {
188
+ // Whether or not Source should copy the string passed in the constructor.
189
+ enum CopiesString { COPIES_STRING, DONT_COPY };
190
+
191
+ explicit Source(
192
+ c10::string_view text_view,
193
+ c10::optional<std::string> filename = c10::nullopt,
194
+ size_t starting_line_no = 0,
195
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr,
196
+ CopiesString copies_str = COPIES_STRING)
197
+ : filename_(std::move(filename)),
198
+ starting_line_no_(starting_line_no),
199
+ gen_ranges_(std::move(gen_ranges)) {
200
+ if (copies_str == COPIES_STRING) {
201
+ std::shared_ptr<std::string> allocated_str =
202
+ std::make_shared<std::string>(text_view.data(), text_view.size());
203
+ text_view_ = StringCordView({*allocated_str}, {allocated_str});
204
+ } else {
205
+ text_view_ = StringCordView({text_view}, {});
206
+ }
207
+
208
+ calc_line_start_offsets();
209
+ }
210
+
211
+ explicit Source(
212
+ StringCordView str,
213
+ c10::optional<std::string> filename = c10::nullopt,
214
+ size_t starting_line_no = 0,
215
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
216
+ : text_view_(std::move(str)),
217
+ filename_(std::move(filename)),
218
+ starting_line_no_(starting_line_no),
219
+ gen_ranges_(std::move(gen_ranges)) {
220
+ calc_line_start_offsets();
221
+ }
222
+ // Given a line number (within source_), return the byte offset of the
223
+ // beginning of that line.
224
+ size_t offset_for_line(size_t line) const {
225
+ return line_starting_offsets_.at(line);
226
+ }
227
+
228
+ // Returns number of lines present.
229
+ size_t num_lines() const {
230
+ return line_starting_offsets_.size();
231
+ }
232
+
233
+ // Calculate the line (within the code segment) on which `offset` resides.
234
+ size_t lineno_for_offset(size_t offset) const {
235
+ auto iter = std::upper_bound(
236
+ line_starting_offsets_.begin(), line_starting_offsets_.end(), offset);
237
+ return iter - line_starting_offsets_.begin() - 1;
238
+ }
239
+
240
+ // Calculate the line (within the original source file, if present) on which
241
+ // `lineno` resides.
242
+ size_t lineno_to_source_lineno(size_t lineno) const {
243
+ if (filename_) {
244
+ return lineno + starting_line_no_;
245
+ } else {
246
+ return lineno;
247
+ }
248
+ }
249
+
250
+ StringCordView get_line(size_t lineno) const {
251
+ auto start = offset_for_line(lineno);
252
+ auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start
253
+ : text_view_.size() - start;
254
+ return text_view_.substr(start, size);
255
+ }
256
+
257
+ const StringCordView& text_str() const {
258
+ return text_view_;
259
+ }
260
+
261
+ char char_at(size_t index) const {
262
+ return text_view_.at(index);
263
+ }
264
+
265
+ size_t size() const {
266
+ return text_view_.size();
267
+ }
268
+
269
+ c10::optional<std::string>& filename() {
270
+ return filename_;
271
+ }
272
+
273
+ size_t starting_line_no() const {
274
+ return starting_line_no_;
275
+ }
276
+
277
+ c10::optional<SourceRange> findSourceRangeThatGenerated(
278
+ const SourceRange& range);
279
+
280
+ ~Source() = default;
281
+
282
+ private:
283
+ void calc_line_start_offsets() {
284
+ line_starting_offsets_.clear();
285
+ line_starting_offsets_.push_back(0);
286
+ size_t pos = 0;
287
+ while ((pos = text_view_.find("\n", pos)) != std::string::npos) {
288
+ line_starting_offsets_.push_back(++pos);
289
+ }
290
+ }
291
+
292
+ StringCordView text_view_;
293
+
294
+ c10::optional<std::string> filename_;
295
+ // If filename_ is not present, starting_line_no_ is don't care
296
+ size_t starting_line_no_;
297
+ // Starting offsets for lines into the source. e.g. line 0 starts at
298
+ // line_starting_offsets_[0], etc.
299
+ std::vector<size_t> line_starting_offsets_;
300
+
301
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges_;
302
+ };
303
+
304
+ // A SourceRange is a reference to subset of a Source, specified by `start` and
305
+ // `end` byte offsets into the source text.
306
+ struct TORCH_API SourceRange {
307
+ SourceRange(std::shared_ptr<Source> source_view, size_t start_, size_t end_)
308
+ : source_view_(std::move(source_view)), start_(start_), end_(end_) {
309
+ if (source_view_) {
310
+ start_iter_ = source_view_->text_str().iter_for_pos(start_);
311
+ }
312
+ }
313
+
314
+ SourceRange() : source_view_(nullptr), start_(0), end_(0) {}
315
+
316
+ SourceRange(
317
+ std::shared_ptr<Source> source_view_,
318
+ StringCordView::Iterator start_iter,
319
+ size_t end_)
320
+ : source_view_(std::move(source_view_)),
321
+ start_(start_iter.pos()),
322
+ end_(end_),
323
+ start_iter_(start_iter) {}
324
+
325
+ const c10::string_view token_text() const {
326
+ size_t size = end() - start();
327
+ return start_iter_.rest_line().substr(0, size);
328
+ }
329
+
330
+ const StringCordView text() const {
331
+ return source_view_->text_str().substr(start(), end() - start());
332
+ }
333
+ size_t size() const {
334
+ return end() - start();
335
+ }
336
+ static const size_t CONTEXT = 3;
337
+ void highlight(std::ostream& out) const;
338
+
339
+ // Customizable version of 'highlight' method.
340
+ void print_with_context(
341
+ std::ostream& out,
342
+ size_t context,
343
+ bool highlight,
344
+ const std::string& funcname) const;
345
+
346
+ const std::shared_ptr<Source>& source() const {
347
+ return source_view_;
348
+ }
349
+ size_t start() const {
350
+ return start_;
351
+ }
352
+ size_t end() const {
353
+ return end_;
354
+ }
355
+ std::string str() const {
356
+ std::stringstream ss;
357
+ highlight(ss);
358
+ return ss.str();
359
+ }
360
+
361
+ c10::optional<std::tuple<std::string, size_t, size_t>> file_line_col() const {
362
+ if (!source_view_ || !source()->filename()) {
363
+ return c10::nullopt;
364
+ }
365
+
366
+ auto lineno = source_view_->lineno_for_offset(start_);
367
+ auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno);
368
+ // TODO: c10::optional<>::value returns an rvalue ref so can't use it here??
369
+ return std::make_tuple<std::string, size_t, size_t>(
370
+ source_view_->filename().value_or(""),
371
+ source_view_->lineno_to_source_lineno(lineno),
372
+ (size_t)col_offset);
373
+ }
374
+
375
+ bool operator==(const SourceRange& rhs) const {
376
+ return start() == rhs.start() && end() == rhs.end() &&
377
+ source() == rhs.source();
378
+ }
379
+
380
+ bool operator!=(const SourceRange& rhs) const {
381
+ return !(*this == rhs);
382
+ }
383
+
384
+ c10::optional<SourceRange> findSourceRangeThatGenerated() const {
385
+ if (!source_view_) {
386
+ return c10::nullopt;
387
+ }
388
+ return source_view_->findSourceRangeThatGenerated(*this);
389
+ }
390
+
391
+ protected:
392
+ std::shared_ptr<Source> source_view_;
393
+
394
+ private:
395
+ size_t start_;
396
+ size_t end_;
397
+ StringCordView::Iterator start_iter_;
398
+ };
399
+
400
+ // OwnedSourceRange is just like a SourceRange except that it owns a `Source`
401
+ // instead of `Source`. Thus OwnedSourceRange owns a copy of source text.
402
+ struct OwnedSourceRange : public SourceRange {
403
+ explicit OwnedSourceRange(const SourceRange& source_range)
404
+ : SourceRange(source_range) {
405
+ const auto& source = source_range.source();
406
+ if (source) {
407
+ source_view_ = std::make_shared<Source>(
408
+ source->text_str().str(),
409
+ source->filename(),
410
+ source->starting_line_no());
411
+ }
412
+ }
413
+ };
414
+
415
+ struct TORCH_API SourceRangeHasher {
416
+ public:
417
+ size_t operator()(const torch::jit::SourceRange& key) const;
418
+ };
419
+
420
+ struct StackEntry {
421
+ std::string filename;
422
+ SourceRange range;
423
+ };
424
+
425
+ TORCH_API void format_stack_trace(
426
+ std::ostream& out,
427
+ const std::vector<StackEntry>& entries);
428
+
429
+ inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) {
430
+ range.highlight(out);
431
+ return out;
432
+ }
433
+
434
+ // A pair of (byte offset, SourceRange) describing a specific segment
435
+ // of the output stream
436
+ struct TaggedRange {
437
+ TaggedRange(size_t bytes, SourceRange range)
438
+ : bytes(bytes), range(std::move(range)) {}
439
+ size_t bytes;
440
+ SourceRange range;
441
+ };
442
+ using SourceRangeRecords = std::vector<TaggedRange>;
443
+ using SourceRangeTagMap =
444
+ std::unordered_map<SourceRange, int64_t, SourceRangeHasher>;
445
+
446
+ } // namespace torch::jit
447
+
448
+ namespace std {
449
+ template <>
450
+ struct iterator_traits<torch::jit::StringCordView::Iterator> {
451
+ using value_type = char;
452
+ using difference_type = ptrdiff_t;
453
+ using pointer = char*;
454
+ using reference = char&;
455
+ using iterator_category = std::forward_iterator_tag;
456
+ };
457
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <c10/macros/Export.h>
8
+ #include <torch/csrc/jit/frontend/source_range.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * SourceRef does two things:
15
+ * 1. Owns a Source object.
16
+ * 2. Serves as lookup key to the owned Source in associative containers, for
17
+ * runtime data aggregation.
18
+ * We don't want to use std::shared_ptr<Source> directly because we want to
19
+ * support heteogeneous lookup, and also shared_ptr is an implementation detail
20
+ * which should be encapsulated.
21
+ */
22
+ class TORCH_API SourceRef : public CustomClassHolder {
23
+ public:
24
+ explicit SourceRef(std::shared_ptr<Source> source_view)
25
+ : source_view_(std::move(source_view)) {}
26
+ bool operator==(const SourceRef& other) const {
27
+ return source_view_ == other.source_view_;
28
+ }
29
+ bool operator<(const Source& other) const {
30
+ return source_view_.get() < &other;
31
+ }
32
+ friend bool operator<(const Source& other, const SourceRef& self) {
33
+ return &other < self.source_view_.get();
34
+ }
35
+ bool operator<(const SourceRef& other) const {
36
+ return *this < *other.source_view_.get();
37
+ }
38
+ const Source* operator->() const {
39
+ return source_view_.get();
40
+ }
41
+
42
+ private:
43
+ std::shared_ptr<Source> source_view_;
44
+ };
45
+
46
+ } // namespace jit
47
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API double strtod_c(const char* nptr, char** endptr);
9
+ TORCH_API float strtof_c(const char* nptr, char** endptr);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <utility>
7
+
8
+ #include <ATen/core/symbol.h>
9
+ #include <caffe2/serialize/versions.h>
10
+ #include <torch/csrc/jit/api/module.h>
11
+ #include <torch/csrc/jit/frontend/error_report.h>
12
+ #include <torch/csrc/jit/frontend/schema_matching.h>
13
+ #include <torch/csrc/jit/frontend/versioned_symbols.h>
14
+ #include <torch/csrc/jit/ir/ir.h>
15
+
16
+ namespace torch {
17
+ namespace jit {
18
+
19
+ using SugaredValuePtr = std::shared_ptr<SugaredValue>;
20
+
21
+ // The AST can contain nodes like `self`, `self.b` or `python_fn` that
22
+ // are not first-class values in the graph representation, but instead
23
+ // will be desugared based on how they are used in the AST.
24
+
25
+ // SugaredValue is used to temporarily represent these values in a way
26
+ // that separates their behavior from the AST -> IR converter itself.
27
+ // This allows us to keep dependencies on python minimal.
28
+
29
+ struct TORCH_API SugaredValue
30
+ : public std::enable_shared_from_this<SugaredValue> {
31
+ // what is this node? for error reporting (e.g. Module, python function)
32
+ virtual std::string kind() const = 0;
33
+
34
+ // what can we do with this thing?
35
+ // use it as a value e.g. `this + 4`
36
+ virtual Value* asValue(const SourceRange& loc, GraphFunction& m) {
37
+ throw ErrorReport(loc) << kind() << " cannot be used as a value";
38
+ }
39
+
40
+ // select an attribute on it, e.g. `this.field`
41
+ virtual std::shared_ptr<SugaredValue> attr(
42
+ const SourceRange& loc,
43
+ GraphFunction& m,
44
+ const std::string& field) {
45
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
46
+ }
47
+
48
+ virtual bool hasAttr(
49
+ const SourceRange& loc,
50
+ GraphFunction& m,
51
+ const std::string& field) {
52
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
53
+ }
54
+
55
+ // assign an attribute on it, e.g. `this.field = newValue`
56
+ virtual void setAttr(
57
+ const SourceRange& loc,
58
+ GraphFunction& m,
59
+ const std::string& field,
60
+ Value* newValue) {
61
+ throw ErrorReport(loc) << "attribute assignment is not defined on "
62
+ << kind();
63
+ }
64
+
65
+ // use it as a vector of values, e.g. a tuple of values as return value from
66
+ // a method invocation
67
+ virtual std::vector<std::shared_ptr<SugaredValue>> asTuple(
68
+ const SourceRange& loc,
69
+ GraphFunction& m,
70
+ const c10::optional<size_t>& size_hint = {}) {
71
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuple";
72
+ }
73
+
74
+ // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API
75
+ virtual SugaredValuePtr asTupleValue(
76
+ const SourceRange& loc,
77
+ GraphFunction& m) {
78
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuplevalue";
79
+ }
80
+
81
+ virtual std::vector<std::shared_ptr<SugaredValue>> asType(
82
+ const SourceRange& loc,
83
+ Method& m) {
84
+ throw ErrorReport(loc) << kind() << " cannot be used as a type";
85
+ }
86
+
87
+ // call it like a function, e.g. `outputs = this(inputs)`
88
+ virtual std::shared_ptr<SugaredValue> call(
89
+ const SourceRange& loc,
90
+ GraphFunction& m,
91
+ // note: names for args will be 'argument 0', 'argument 1', etc..
92
+ at::ArrayRef<NamedValue> args,
93
+ at::ArrayRef<NamedValue> kwargs,
94
+ size_t n_binders) {
95
+ // n_binders is always set to the number of variables an expression is
96
+ // syntactically bound to:
97
+ // a = foo() # 1 binder (note in this case the single binder might be a
98
+ // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0
99
+ // binders
100
+ //
101
+ // In subexpressions, like bar() in foo(bar()), n_binders is always set to
102
+ // 1. n_binders is used as a hint to subexpressions to determine how many
103
+ // values they should return when that number is ambiguous statically. In
104
+ // particular it is currently used to decide how many tensors a call to a
105
+ // python function will return. It is only a hint, functions do not have to
106
+ // check that n_binders match the number of things they are returning, the
107
+ // assignment logic will do that anyway.
108
+
109
+ throw ErrorReport(loc) << "cannot call a " << kind();
110
+ }
111
+
112
+ // This function is called when to convert a SugaredValue to its iterator.
113
+ // For example, when iterating through a Dict we iterate over its keys
114
+ virtual std::shared_ptr<SugaredValue> iter(
115
+ const SourceRange& loc,
116
+ GraphFunction& m) {
117
+ throw ErrorReport(loc) << kind() << " cannot be used as an iterable";
118
+ }
119
+
120
+ // If we are iterating over a Sugared Value and it returns a value from this
121
+ // function, then we emit an unrolled loop over the variable. This allows us
122
+ // to support containers of Heterogenous types, like Module Containers &
123
+ // Tuples
124
+ virtual c10::optional<int64_t> staticLen() {
125
+ return c10::nullopt;
126
+ }
127
+
128
+ // When iterating over this SugaredValue, should we emit the for loop as an
129
+ // unrolled loop.
130
+ bool shouldEmitUnrolled() {
131
+ return staticLen() != c10::nullopt;
132
+ }
133
+
134
+ // return length of this thing, if not then it can't be iterated.
135
+ // If it does not have a statically-determinable length, then it cannot
136
+ // be iterated over with a modulelist. If it does it must return a constant
137
+ // Value *
138
+ virtual Value* len(const SourceRange& loc, GraphFunction& m) {
139
+ throw ErrorReport(loc) << "'" << kind() << "'"
140
+ << " object is not iterable";
141
+ }
142
+
143
+ // expression for ith elemement for iterable value
144
+ virtual std::shared_ptr<SugaredValue> getitem(
145
+ const SourceRange& loc,
146
+ GraphFunction& m,
147
+ Value* idx,
148
+ TypePtr type_hint = nullptr) {
149
+ throw ErrorReport(loc) << "'" << kind() << "'"
150
+ << " object is not subscriptable";
151
+ }
152
+
153
+ virtual ~SugaredValue() = default;
154
+ };
155
+
156
+ // most things in the environment are just simple value types
157
+ // and not special python syntax sugar types
158
+ struct TORCH_API SimpleValue : public SugaredValue {
159
+ SimpleValue(Value* value) : value_(value) {}
160
+ std::string kind() const override {
161
+ std::stringstream ss;
162
+ // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
163
+ ss << "value of type '" << value_->type()->annotation_str() << "'";
164
+ return ss.str();
165
+ }
166
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
167
+ return value_;
168
+ }
169
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
170
+ const SourceRange& loc,
171
+ GraphFunction& m,
172
+ const c10::optional<size_t>& size_hint = {}) override;
173
+ std::shared_ptr<SugaredValue> attr(
174
+ const SourceRange& loc,
175
+ GraphFunction& m,
176
+ const std::string& field) override;
177
+
178
+ bool hasAttr(
179
+ const SourceRange& loc,
180
+ GraphFunction& m,
181
+ const std::string& field) override;
182
+
183
+ void setAttr(
184
+ const SourceRange& loc,
185
+ GraphFunction& m,
186
+ const std::string& field,
187
+ Value* newValue) override;
188
+
189
+ std::shared_ptr<SugaredValue> call(
190
+ const SourceRange& loc,
191
+ GraphFunction& m,
192
+ // note: names for args will be 'argument 0', 'argument 1', etc..
193
+ at::ArrayRef<NamedValue> args,
194
+ at::ArrayRef<NamedValue> kwargs,
195
+ size_t n_binders) override;
196
+
197
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
198
+ override;
199
+
200
+ Value* getValue() const {
201
+ return value_;
202
+ }
203
+
204
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
205
+ SugaredValuePtr getitem(
206
+ const SourceRange& loc,
207
+ GraphFunction& m,
208
+ Value* idx,
209
+ TypePtr type_hint = nullptr) override;
210
+
211
+ private:
212
+ Value* value_;
213
+ };
214
+
215
+ struct TORCH_API BuiltinFunction : public SugaredValue {
216
+ BuiltinFunction(Symbol symbol, c10::optional<NamedValue> self)
217
+ : symbol(symbol), self(std::move(self)) {}
218
+
219
+ // The symbol of the function (e.g. `aten::relu`).
220
+ Symbol symbol;
221
+
222
+ // if this is method, then this is the self argument.
223
+ c10::optional<NamedValue> self;
224
+ std::string kind() const override {
225
+ return "builtin";
226
+ }
227
+ std::shared_ptr<SugaredValue> call(
228
+ const SourceRange& loc,
229
+ GraphFunction& m,
230
+ at::ArrayRef<NamedValue> args,
231
+ at::ArrayRef<NamedValue> kwargs,
232
+ size_t n_binders) override;
233
+
234
+ // try to create this builtin but if it doesn't exist or the self argument
235
+ // cannot possibly match, then return nullptr. Use in situations where it is
236
+ // not clear if it is a valid builtin
237
+ static std::shared_ptr<BuiltinFunction> tryCreate(
238
+ Symbol symbol,
239
+ c10::optional<NamedValue> self);
240
+ };
241
+
242
+ struct TORCH_API SugaredTupleValue : public SugaredValue {
243
+ explicit SugaredTupleValue(std::vector<std::shared_ptr<SugaredValue>> tup)
244
+ : tup_(std::move(tup)){};
245
+
246
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
247
+ const SourceRange& loc,
248
+ GraphFunction& m,
249
+ const c10::optional<size_t>& size_hint = {}) override {
250
+ return tup_;
251
+ };
252
+
253
+ Value* asValue(const SourceRange& loc, GraphFunction& m) override {
254
+ std::vector<Value*> vec;
255
+ vec.reserve(tup_.size());
256
+ for (const auto& sv : tup_) {
257
+ vec.push_back(sv->asValue(loc, m));
258
+ }
259
+ Graph& g = *m.graph();
260
+ return g.insertNode(g.createTuple(vec))->output();
261
+ }
262
+
263
+ std::string kind() const override {
264
+ return "Tuple";
265
+ }
266
+
267
+ SugaredValuePtr getitem(
268
+ const SourceRange& loc,
269
+ GraphFunction& m,
270
+ Value* idx,
271
+ TypePtr type_hint = nullptr) override {
272
+ if (!(idx->type()->cast<IntType>() && toIValue(idx))) {
273
+ throw ErrorReport(loc)
274
+ << "Expected integer literal for index but got a variable or non-integer. "
275
+ << "ModuleList/Sequential indexing is only supported with integer literals. "
276
+ << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. "
277
+ << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'";
278
+ }
279
+ auto index = toIValue(idx)->toInt();
280
+ int64_t adj_index =
281
+ (index < 0) ? index + static_cast<int64_t>(tup_.size()) : index;
282
+ if (!(adj_index >= 0 && adj_index < static_cast<int64_t>(tup_.size()))) {
283
+ throw ErrorReport(loc)
284
+ << "Index " << index << " out of range of length " << tup_.size();
285
+ }
286
+ return tup_.at(adj_index);
287
+ }
288
+
289
+ // This function is called when a SugaredValue is used to convert a
290
+ // SugaredValue to its iterator. For example, when iterating through a Dict we
291
+ // iterate over its keys
292
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
293
+ override {
294
+ return shared_from_this();
295
+ };
296
+
297
+ // Because this is used to contain SugaredValues of Heterogenous types,
298
+ // we define staticLen() so that when this is iterated over it is emitted
299
+ // as an unrolled loop.
300
+ c10::optional<int64_t> staticLen() override {
301
+ return static_cast<int64_t>(tup_.size());
302
+ }
303
+
304
+ std::vector<std::shared_ptr<SugaredValue>> tup_;
305
+ };
306
+
307
+ struct TORCH_API BuiltinModule : public SugaredValue {
308
+ BuiltinModule(std::string name, c10::optional<int64_t> version = at::nullopt)
309
+ : name(std::move(name)), version(version) {}
310
+
311
+ std::string kind() const override {
312
+ return "builtin module";
313
+ }
314
+ std::shared_ptr<SugaredValue> attr(
315
+ const SourceRange& loc,
316
+ GraphFunction& m,
317
+ const std::string& field) override {
318
+ if (field == "autograd") {
319
+ // When refering torch.autograd, it is also considered to be a
320
+ // BuiltinModule and we will dispatch to the aten operators for the
321
+ // methods under its module.
322
+ return std::make_shared<BuiltinModule>("aten", version);
323
+ }
324
+
325
+ auto sym = Symbol::fromQualString(name + "::" + field);
326
+ return std::make_shared<BuiltinFunction>(sym, c10::nullopt);
327
+ }
328
+
329
+ private:
330
+ std::string name;
331
+ // when we add operator versioning, emit this op as it exising at 'version'
332
+ // if not set, use the latest version
333
+ c10::optional<int64_t> version;
334
+ };
335
+
336
+ // Represents a class, analagous to `int` or `dict`. Instances of classes,
337
+ // like `1` or `{"foo": 5}`, are represented as SimpleValues
338
+ struct TORCH_API ClassValue : public SugaredValue {
339
+ explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {}
340
+
341
+ // Call the type's constructor, as in:
342
+ // n = Foo(constructor_arg)
343
+ std::shared_ptr<SugaredValue> call(
344
+ const SourceRange& loc,
345
+ GraphFunction& m,
346
+ at::ArrayRef<NamedValue> args,
347
+ at::ArrayRef<NamedValue> kwargs,
348
+ size_t n_binders) override;
349
+
350
+ std::shared_ptr<SugaredValue> attr(
351
+ const SourceRange& loc,
352
+ GraphFunction& m,
353
+ const std::string& field) override;
354
+
355
+ std::string kind() const override {
356
+ return type_->str();
357
+ }
358
+
359
+ ClassTypePtr type_;
360
+ };
361
+
362
+ struct TORCH_API NamedTupleConstructor : public SugaredValue {
363
+ explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {}
364
+
365
+ std::shared_ptr<SugaredValue> call(
366
+ const SourceRange& loc,
367
+ GraphFunction& m,
368
+ at::ArrayRef<NamedValue> args,
369
+ at::ArrayRef<NamedValue> kwargs,
370
+ size_t n_binders) override;
371
+
372
+ std::string kind() const override {
373
+ return type_->str();
374
+ }
375
+
376
+ TupleTypePtr type_;
377
+ };
378
+
379
+ struct FunctionValue : public SugaredValue {
380
+ FunctionValue(Function* callee) : callees_({callee}) {}
381
+ FunctionValue(const StrongFunctionPtr& p)
382
+ : callees_({p.function_}), cu_(p.cu_) {}
383
+ FunctionValue(const std::vector<StrongFunctionPtr>& callees) {
384
+ for (const StrongFunctionPtr& callee : callees) {
385
+ cu_ = cu_ ? cu_ : callee.cu_;
386
+ TORCH_INTERNAL_ASSERT(callee.cu_ == cu_);
387
+ callees_.push_back(callee.function_);
388
+ }
389
+ }
390
+
391
+ std::string kind() const override {
392
+ return "function";
393
+ }
394
+
395
+ std::shared_ptr<SugaredValue> call(
396
+ const SourceRange& loc,
397
+ GraphFunction& f,
398
+ at::ArrayRef<NamedValue> args,
399
+ at::ArrayRef<NamedValue> kwargs,
400
+ size_t n_binders) override {
401
+ std::vector<const FunctionSchema*> schemas;
402
+ for (Function* callee : callees_) {
403
+ try {
404
+ callee->ensure_defined();
405
+ } catch (const RecursiveMethodCallError&) {
406
+ throw ErrorReport(loc)
407
+ << " function '" << callee->name() << "' is called recursively. "
408
+ << "Recursive calls are not supported";
409
+ }
410
+ schemas.push_back(&callee->getSchema());
411
+ }
412
+ auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs);
413
+ Value* output =
414
+ f.graph()->insertFunctionCall(callees_[match.first], match.second);
415
+ output->node()->setSourceRange(loc);
416
+ return std::make_shared<SimpleValue>(output);
417
+ }
418
+
419
+ const std::vector<Function*>& callees() {
420
+ return callees_;
421
+ }
422
+
423
+ private:
424
+ std::vector<Function*> callees_;
425
+ // TODO holding this thing is creepy
426
+ std::shared_ptr<CompilationUnit> cu_;
427
+ };
428
+
429
+ struct TORCH_API ClosureValue : public SugaredValue {
430
+ ClosureValue(Value* value) : value_(value) {
431
+ TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure);
432
+ }
433
+ std::string kind() const override {
434
+ return "closure";
435
+ }
436
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
437
+ return value_;
438
+ }
439
+ Value* value_;
440
+ };
441
+
442
+ // defines how a method obtained from a module/class/interface behaves in script
443
+ struct MethodValue : public SugaredValue {
444
+ MethodValue(Value* self, std::vector<std::string> method_names)
445
+ : self_(self), method_names_(std::move(method_names)) {}
446
+ MethodValue(Value* self, std::string method_name)
447
+ : MethodValue(self, std::vector<std::string>({std::move(method_name)})) {}
448
+
449
+ std::string kind() const override {
450
+ return "method";
451
+ }
452
+
453
+ std::shared_ptr<SugaredValue> call(
454
+ const SourceRange& loc,
455
+ GraphFunction& f,
456
+ at::ArrayRef<NamedValue> args,
457
+ at::ArrayRef<NamedValue> kwargs,
458
+ size_t n_binders) override {
459
+ std::vector<NamedValue> argsWithSelf = {self_};
460
+ argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end());
461
+ std::vector<const FunctionSchema*> schemas;
462
+ for (const std::string& method_name : method_names_) {
463
+ if (auto class_type = self_->type()->cast<ClassType>()) {
464
+ Function& method = class_type->getMethod(method_name);
465
+ try {
466
+ method.ensure_defined();
467
+ } catch (const RecursiveMethodCallError&) {
468
+ throw ErrorReport(loc)
469
+ << " method '" << method.name() << "' is called recursively. "
470
+ << "Recursive calls are not supported";
471
+ }
472
+ schemas.push_back(&method.getSchema());
473
+ } else if (auto interface_type = self_->type()->cast<InterfaceType>()) {
474
+ schemas.push_back(interface_type->getMethod(method_name));
475
+ } else {
476
+ TORCH_INTERNAL_ASSERT(
477
+ false, "method constructed that is not a class or interface");
478
+ }
479
+ }
480
+ auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs);
481
+ Value* output =
482
+ f.graph()->insertMethodCall(method_names_[match.first], match.second);
483
+ output->node()->setSourceRange(loc);
484
+ return std::make_shared<SimpleValue>(output);
485
+ }
486
+
487
+ private:
488
+ Value* self_;
489
+ std::vector<std::string> method_names_;
490
+ };
491
+
492
+ struct TORCH_API PrintValue : public SugaredValue {
493
+ std::string kind() const override {
494
+ return "print";
495
+ }
496
+ std::shared_ptr<SugaredValue> call(
497
+ const SourceRange& loc,
498
+ GraphFunction& m,
499
+ at::ArrayRef<NamedValue> args,
500
+ at::ArrayRef<NamedValue> kwargs,
501
+ size_t n_binders) override;
502
+ };
503
+
504
+ // expressions like int(x)
505
+ // these are the same as call prim::Int or equivalent except it
506
+ // is a noop when the input is a subtype of 'type'
507
+ struct TORCH_API CastValue : public BuiltinFunction {
508
+ CastValue(TypePtr type, c10::Symbol method)
509
+ : BuiltinFunction(method, c10::nullopt), type_(std::move(type)) {}
510
+ std::shared_ptr<SugaredValue> call(
511
+ const SourceRange& loc,
512
+ GraphFunction& m,
513
+ at::ArrayRef<NamedValue> args,
514
+ at::ArrayRef<NamedValue> kwargs,
515
+ size_t n_binders) override {
516
+ if (args.size() == 1 && kwargs.empty()) {
517
+ auto len_op = std::make_shared<BuiltinFunction>(aten::len, at::nullopt);
518
+ auto gt_op = std::make_shared<BuiltinFunction>(aten::gt, at::nullopt);
519
+ auto zero = m.graph()->insertConstant(0);
520
+
521
+ auto v = args[0].value(*m.graph());
522
+ if (v->type()->isSubtypeOf(*type_)) {
523
+ return std::make_shared<SimpleValue>(v);
524
+ } else if (
525
+ *type_ == *BoolType::get() &&
526
+ (v->type()->isSubtypeOf(*AnyListType::get()) ||
527
+ v->type()->isSubtypeOf(*StringType::get()) ||
528
+ v->type()->cast<DictType>())) {
529
+ auto len = len_op->call(loc, m, {v}, {}, 1);
530
+ return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1);
531
+ }
532
+ }
533
+ return BuiltinFunction::call(loc, m, args, kwargs, n_binders);
534
+ }
535
+
536
+ private:
537
+ TypePtr type_;
538
+ };
539
+
540
+ struct TORCH_API TensorCastValue : public SugaredValue {
541
+ TensorCastValue(at::ScalarType type, NamedValue self)
542
+ : dtype_(type), self_(std::move(self)) {}
543
+
544
+ std::string kind() const override {
545
+ return "Cast";
546
+ }
547
+
548
+ std::shared_ptr<SugaredValue> call(
549
+ const SourceRange& loc,
550
+ GraphFunction& m,
551
+ at::ArrayRef<NamedValue> args,
552
+ at::ArrayRef<NamedValue> kwargs,
553
+ size_t n_binders) override {
554
+ TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty());
555
+ Value* dtype_const = m.graph()->insertConstant(dtype_, loc);
556
+ std::vector<NamedValue> kwargs_{
557
+ self_, NamedValue(loc, "dtype", dtype_const)};
558
+ Value* casted_val = m.graph()->insert(
559
+ /*opname=*/Symbol::fromQualString("aten::to"),
560
+ /*args=*/args,
561
+ /*kwargs=*/kwargs_,
562
+ /*range=*/loc);
563
+ return std::make_shared<SimpleValue>(casted_val);
564
+ }
565
+
566
+ at::ScalarType dtype_;
567
+ NamedValue self_;
568
+ };
569
+
570
+ // builtins operators and functions that call a method if it exists
571
+ // on a class type, like 'len(x)' and 'x + y'
572
+ struct TORCH_API MagicMethod : public SugaredValue {
573
+ MagicMethod(std::string desugared_name, SugaredValuePtr base)
574
+ : base_value_(std::move(base)),
575
+ desugared_name_(std::move(desugared_name)) {}
576
+
577
+ std::string kind() const override {
578
+ return desugared_name_;
579
+ }
580
+
581
+ std::shared_ptr<SugaredValue> call(
582
+ const SourceRange& loc,
583
+ GraphFunction& m,
584
+ at::ArrayRef<NamedValue> args,
585
+ at::ArrayRef<NamedValue> kwargs,
586
+ size_t n_binders) override;
587
+
588
+ private:
589
+ SugaredValuePtr base_value_;
590
+ std::string desugared_name_;
591
+ };
592
+
593
+ // things that look like function applications, but
594
+ // perform non-standard evaluation are represented
595
+ // with SpecialFormValues, e.g.
596
+ // isinstance(x, int)
597
+ // fork(fn)
598
+ // annotate(int, 3)
599
+ // The implementation of each value is handled by a case inside emitApplyExpr
600
+ struct TORCH_API SpecialFormValue : public SugaredValue {
601
+ SpecialFormValue(Symbol form) : form_(form) {}
602
+ std::string kind() const override {
603
+ return form_.toUnqualString();
604
+ }
605
+ Symbol form() const {
606
+ return form_;
607
+ }
608
+ static std::shared_ptr<SpecialFormValue> create(Symbol form) {
609
+ return std::make_shared<SpecialFormValue>(form);
610
+ }
611
+
612
+ private:
613
+ Symbol form_;
614
+ };
615
+
616
+ struct TORCH_API LegacyTensorConstructor : public SpecialFormValue {
617
+ LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device)
618
+ : SpecialFormValue(form), device_(device), dtype_(dtype) {}
619
+
620
+ static std::shared_ptr<LegacyTensorConstructor> create(
621
+ Symbol form,
622
+ at::ScalarType dtype,
623
+ at::Device device) {
624
+ return std::make_shared<LegacyTensorConstructor>(form, dtype, device);
625
+ }
626
+ at::ScalarType dtype() const {
627
+ return dtype_;
628
+ }
629
+
630
+ private:
631
+ at::Device device_;
632
+ at::ScalarType dtype_;
633
+ };
634
+
635
+ // matched against for special handling of range expressions
636
+ struct TORCH_API RangeValue : SugaredValue {
637
+ RangeValue(
638
+ const SourceRange& loc,
639
+ GraphFunction& m,
640
+ std::vector<Value*> input,
641
+ c10::optional<int64_t> static_len = c10::nullopt);
642
+
643
+ std::string kind() const override {
644
+ return "range";
645
+ }
646
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
647
+ SugaredValuePtr getitem(
648
+ const SourceRange& loc,
649
+ GraphFunction& m,
650
+ Value* idx,
651
+ TypePtr type_hint = nullptr) override;
652
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
653
+ override;
654
+
655
+ // When Range is instantiated via enumerate(iterable_with_static_len),
656
+ // then it takes the static length of the iterable
657
+ c10::optional<int64_t> staticLen() override {
658
+ return static_len_;
659
+ }
660
+
661
+ private:
662
+ Value* start_{};
663
+ Value* end_{};
664
+ Value* step_{};
665
+ // a flag to determine if it's a simple range() call with only end_ from
666
+ // arguments If true, we will not insert length calculation and index
667
+ // derivation nodes to simplify the graph and enable more possible
668
+ // optimizations
669
+ bool has_only_end_{};
670
+ c10::optional<int64_t> static_len_;
671
+ };
672
+
673
+ // Specialized Tree structure to matched against for special handling
674
+ // of builtin functions iterables expressions like zip(), enumerate(), etc.
675
+ // zip and enumerate can be modeled as a tree of SimpleValue/RangeValue:
676
+ // zip(x, y) -> (x, y) with tuple assignment to each loop target
677
+ // enumerate(x) -> (range(0, math.inf, 1), x)
678
+ // So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be:
679
+ // (a, (range(0, math.inf, 1), b), range(0, 100))
680
+ // We use those base iterables to fill in the loop information like
681
+ // max_trip_count and set the value table for loop targets
682
+ // Iterables can contain lists of SugaredValues like ModuleLists. If it
683
+ // does, then we emit it unrolled and require that all values it contains
684
+ // have a statically-determinable length.
685
+ struct TORCH_API IterableTree : SugaredValue {
686
+ IterableTree() = default;
687
+ IterableTree(
688
+ const SourceRange& range,
689
+ GraphFunction& m,
690
+ at::ArrayRef<SugaredValuePtr> children) {
691
+ for (const auto& child : children) {
692
+ addChild(range, m, child);
693
+ }
694
+ }
695
+ std::string kind() const override {
696
+ return "iterabletree";
697
+ }
698
+
699
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
700
+ override {
701
+ return shared_from_this();
702
+ }
703
+
704
+ void addChild(
705
+ const SourceRange& range,
706
+ GraphFunction& m,
707
+ const SugaredValuePtr& iter_value);
708
+
709
+ std::vector<SugaredValuePtr> get_children() {
710
+ return children_;
711
+ }
712
+
713
+ // If this iterable contains a ModuleList or Tuple, then it will have a
714
+ // static length, and we will emit it as an unrolled for loop.
715
+ c10::optional<int64_t> staticLen() override {
716
+ return unroll_length_;
717
+ }
718
+
719
+ // given a IterableTree node, get all the base iterables/leaves under the
720
+ // IterableTree node. This enables
721
+ // us to get all the basic SugaredValues that contains valid loop information
722
+ // with len() and getitem()
723
+ std::vector<SugaredValuePtr> get_base_iterables();
724
+
725
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
726
+ SugaredValuePtr getitem(
727
+ const SourceRange& loc,
728
+ GraphFunction& m,
729
+ Value* idx,
730
+ TypePtr type_hint = nullptr) override;
731
+
732
+ private:
733
+ c10::optional<int64_t> unroll_length_ = c10::nullopt;
734
+ std::vector<SugaredValuePtr> children_;
735
+ };
736
+
737
+ static inline std::vector<Value*> toValues(
738
+ Graph& g,
739
+ at::ArrayRef<NamedValue> nvs) {
740
+ return fmap(nvs, [&](const NamedValue& v) { return v.value(g); });
741
+ }
742
+
743
+ struct SimpleSelf : public Self {
744
+ explicit SimpleSelf(ClassTypePtr classType)
745
+ : Self(), classType_(std::move(classType)) {}
746
+ std::shared_ptr<SugaredValue> makeSugared(Value* v) const override {
747
+ v->setType(classType_);
748
+ return std::make_shared<SimpleValue>(v);
749
+ }
750
+ ClassTypePtr getClassType() const override {
751
+ return classType_;
752
+ }
753
+
754
+ private:
755
+ ClassTypePtr classType_;
756
+ };
757
+
758
+ // This is not a SimpleValue so it can not pass through the code paths that
759
+ // expect a SimpleValue as a sugared value.
760
+ struct TORCH_API ExceptionMessageValue : public SugaredValue {
761
+ explicit ExceptionMessageValue(
762
+ Value* value,
763
+ Value* qualified_class_name = nullptr)
764
+ : value_(value), qualified_class_name_(qualified_class_name) {}
765
+
766
+ std::string kind() const override {
767
+ return "exception message";
768
+ }
769
+
770
+ Value* getValue() {
771
+ return value_;
772
+ }
773
+
774
+ // qualified python class name
775
+ Value* getQualifiedClassName() {
776
+ return qualified_class_name_;
777
+ }
778
+
779
+ private:
780
+ Value* value_;
781
+ Value* qualified_class_name_;
782
+ };
783
+
784
+ struct TORCH_API ExceptionValue : public SugaredValue {
785
+ explicit ExceptionValue(std::string message) : message_(std::move(message)) {}
786
+
787
+ std::string kind() const override {
788
+ return "exception";
789
+ }
790
+
791
+ std::shared_ptr<SugaredValue> call(
792
+ const SourceRange& loc,
793
+ GraphFunction& m,
794
+ at::ArrayRef<NamedValue> args,
795
+ at::ArrayRef<NamedValue> /*attributes*/,
796
+ size_t /*n_binders*/) override {
797
+ auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc);
798
+ for (auto& input : args) {
799
+ auto input_str = input.value(*m.graph());
800
+ if (!input_str->type()->isSubtypeOf(*StringType::get())) {
801
+ input_str =
802
+ emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {});
803
+ }
804
+ exception_message = emitBuiltinCall(
805
+ loc, *m.graph(), aten::add, {exception_message, input_str}, {});
806
+ }
807
+ return std::make_shared<ExceptionMessageValue>(exception_message);
808
+ }
809
+
810
+ std::string message_;
811
+ };
812
+
813
+ struct TORCH_API SugaredEnumClass : public SugaredValue {
814
+ explicit SugaredEnumClass(EnumTypePtr enum_type)
815
+ : enum_type_(std::move(enum_type)) {}
816
+
817
+ std::string kind() const override {
818
+ return "EnumClass";
819
+ }
820
+
821
+ SugaredValuePtr attr(
822
+ const SourceRange& loc,
823
+ GraphFunction& m,
824
+ const std::string& field) override;
825
+
826
+ SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override;
827
+
828
+ private:
829
+ EnumTypePtr enum_type_;
830
+ };
831
+
832
+ struct TORCH_API SliceValue : public SugaredValue {
833
+ explicit SliceValue(Value* start, Value* stop, Value* step)
834
+ : start_(start), stop_(stop), step_(step) {}
835
+
836
+ std::string kind() const override {
837
+ return "Python slice value";
838
+ }
839
+
840
+ Value* start() {
841
+ return start_;
842
+ };
843
+ Value* stop() {
844
+ return stop_;
845
+ };
846
+ Value* step() {
847
+ return step_;
848
+ };
849
+
850
+ private:
851
+ Value* start_;
852
+ Value* stop_;
853
+ Value* step_;
854
+ };
855
+
856
+ } // namespace jit
857
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dimname.h>
4
+ #include <ATen/core/class_type.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <ATen/core/symbol.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <torch/csrc/Export.h>
10
+
11
+ #include <torch/csrc/jit/frontend/source_range.h>
12
+ #include <torch/csrc/utils/variadic.h>
13
+
14
+ #include <cstdint>
15
+ #include <memory>
16
+ #include <mutex>
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace torch::jit {
21
+ struct Node;
22
+ struct Value;
23
+ struct Graph;
24
+ struct Module;
25
+
26
+ namespace tracer {
27
+
28
+ using ::c10::ivalue::Shared;
29
+
30
+ using ::c10::IValue;
31
+ using ::c10::ivalue::Future;
32
+
33
+ using ::c10::ArrayRef;
34
+ using ::c10::TupleType;
35
+ using ::c10::TupleTypePtr;
36
+ using ::c10::ivalue::ConstantString;
37
+
38
+ using torch::autograd::Variable;
39
+ using variable_list = std::vector<Variable>;
40
+
41
+ TORCH_API std::atomic<bool>& getTracerStateWarnMode();
42
+
43
+ struct TORCH_API TracingState
44
+ : public std::enable_shared_from_this<TracingState> {
45
+ TracingState();
46
+ ~TracingState();
47
+
48
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
49
+ std::shared_ptr<Graph> graph;
50
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
51
+ bool warn = getTracerStateWarnMode();
52
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
53
+ bool strict = true;
54
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
55
+ bool force_outplace = false;
56
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
57
+ std::function<std::string(const Variable& var)> lookup_var_name_fn =
58
+ [](const Variable& var) { return ""; };
59
+
60
+ void enterFrame() {
61
+ env_stack.emplace_back();
62
+ }
63
+
64
+ void leaveFrame() {
65
+ env_stack.pop_back();
66
+ }
67
+
68
+ void setValue(const IValue& v, Value* value);
69
+ void delValue(const IValue& var);
70
+ Value* getValue(const IValue& var);
71
+ Value* getOutput(const IValue& var, size_t i);
72
+ bool hasValue(const IValue& var) const;
73
+
74
+ Node* createNode(c10::Symbol op_name, size_t num_outputs);
75
+ void insertNode(Node* node);
76
+
77
+ private:
78
+ using WeakIValue = at::WeakIValue;
79
+
80
+ struct WeakIValueHasher {
81
+ size_t operator()(const WeakIValue& t) const {
82
+ return t.hash();
83
+ }
84
+ };
85
+
86
+ struct WeakIValueEq {
87
+ bool operator()(const WeakIValue& t1, const WeakIValue& t2) const {
88
+ return t1.isSameIdentity(t2);
89
+ }
90
+ };
91
+
92
+ using Frame =
93
+ std::unordered_map<WeakIValue, Value*, WeakIValueHasher, WeakIValueEq>;
94
+ std::vector<Frame> env_stack;
95
+ };
96
+
97
+ // This is meant to be used as a thread local place, where we can store extra
98
+ // info that gets lost when we call into ATen from Python bindings. One example
99
+ // for when this happens is when we get an IntArrayRef argument with e.g. sizes
100
+ // for view. When tracing, those might be tensors, which let us encode extra
101
+ // data dependencies, but once they get to the ATen call where we actually have
102
+ // the tracing logic, they get converted into a raw IntArrayRef, and we loose
103
+ // all information. To prevent this, we temporarily stash it in here.
104
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
105
+ struct ArgumentStash {
106
+ struct IntArrayRefTrace : std::vector<Value*> {
107
+ IntArrayRefTrace(int size) : std::vector<Value*>(size, nullptr) {}
108
+ };
109
+
110
+ static bool empty() {
111
+ return stash.intlists.empty();
112
+ }
113
+
114
+ TORCH_API static void stashIntArrayRefElem(
115
+ const std::string& arg_name,
116
+ size_t size,
117
+ size_t idx,
118
+ const Variable& var);
119
+
120
+ static bool hasIntArrayRef(const std::string& arg_name) {
121
+ return stash.intlists.count(arg_name) > 0;
122
+ }
123
+
124
+ static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) {
125
+ auto info = std::move(stash.intlists.at(arg_name));
126
+ stash.intlists.erase(arg_name);
127
+ return info;
128
+ }
129
+
130
+ // Value stashing: Use these methods to stash arguments which correspond
131
+ // to regular Value*'s in the graph. i.e. they don't require special
132
+ // handling like in the case of IntArrayRefs
133
+ TORCH_API static void stashValue(
134
+ const std::string& arg_name,
135
+ size_t idx,
136
+ const Variable& var,
137
+ const c10::TypePtr& type = nullptr);
138
+
139
+ static bool hasValue(const std::string& arg_name) {
140
+ return stash.values.count(arg_name) > 0;
141
+ }
142
+
143
+ static Value* popValue(const std::string& arg_name) {
144
+ auto info = stash.values.at(arg_name);
145
+ stash.values.erase(arg_name);
146
+ return info;
147
+ }
148
+
149
+ private:
150
+ static thread_local ArgumentStash stash;
151
+ std::unordered_map<std::string, IntArrayRefTrace> intlists;
152
+ std::unordered_map<std::string, Value*> values;
153
+ };
154
+
155
+ // Retrieve or set the current tracing state. Returns a nullptr if tracing is
156
+ // disabled.
157
+ TORCH_API const std::shared_ptr<TracingState>& getTracingState();
158
+ TORCH_API void setTracingState(std::shared_ptr<TracingState> state);
159
+
160
+ inline bool isTracing() {
161
+ return static_cast<bool>(getTracingState());
162
+ }
163
+
164
+ using warn_fn_type = void (*)(const std::string& msg);
165
+ TORCH_API extern const char* WARN_PYTHON_DATAFLOW;
166
+ TORCH_API extern const char* WARN_CONSTRUCTOR;
167
+ TORCH_API extern const char* WARN_RESIZE;
168
+ TORCH_API extern const char* STRICT_TRACER_MSG;
169
+ TORCH_API void _do_warn(const char* _reason, const char* _kind);
170
+ inline void warn(const char* _reason, const char* _kind = nullptr) {
171
+ if (const auto& state = getTracingState()) {
172
+ if (!state->warn)
173
+ return;
174
+ _do_warn(_reason, _kind);
175
+ }
176
+ }
177
+ TORCH_API void setWarn(warn_fn_type fn);
178
+
179
+ struct TORCH_API NoWarn {
180
+ NoWarn() : state(getTracingState()) {
181
+ if (state) {
182
+ prev = state->warn;
183
+ state->warn = false;
184
+ }
185
+ }
186
+ ~NoWarn() {
187
+ if (state) {
188
+ state->warn = prev;
189
+ }
190
+ }
191
+ std::shared_ptr<TracingState> state;
192
+ bool prev{false};
193
+ };
194
+
195
+ struct WithNestedTracingFrame {
196
+ WithNestedTracingFrame() {
197
+ getTracingState()->enterFrame();
198
+ }
199
+
200
+ ~WithNestedTracingFrame() {
201
+ getTracingState()->leaveFrame();
202
+ }
203
+ };
204
+ TORCH_API void recordSourceLocation(Node* n);
205
+ TORCH_API void setRecordSourceLocation(void (*v)(Node*));
206
+
207
+ TORCH_API std::vector<StackEntry> pythonCallstack();
208
+ TORCH_API void setPythonCallstack(std::vector<StackEntry> (*v)());
209
+
210
+ // Having finished adding a new 'node' to the graph IR 'setValueTrace'
211
+ // associates this node with an output variable, so that further operations
212
+ // involving this variable know which node in the IR to reference.
213
+ TORCH_API void setValueTrace(const IValue& v, Value* value);
214
+
215
+ TORCH_API void delValueTrace(const IValue& var);
216
+
217
+ TORCH_API std::function<void()> pauseTracing();
218
+
219
+ TORCH_API Value* getValueTrace(const IValue& var);
220
+
221
+ TORCH_API std::pair<std::shared_ptr<TracingState>, Stack> trace(
222
+ Stack inputs,
223
+ const std::function<Stack(Stack)>& traced_fn,
224
+ std::function<std::string(const Variable&)> var_name_lookup_fn,
225
+ bool strict = true,
226
+ bool force_outplace = false,
227
+ Module* self = nullptr,
228
+ const std::vector<std::string>& argument_names = {});
229
+
230
+ TORCH_API void abandon();
231
+
232
+ // NB: those serve both as an intermediate steps in addInputs below,
233
+ // as well as the overloads that terminate template recursion
234
+ TORCH_API void addInputs(Node* n, const char* name, int64_t value);
235
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value);
236
+ TORCH_API void addInputs(
237
+ Node* n,
238
+ const char* name,
239
+ c10::optional<int64_t> value);
240
+ TORCH_API void addInputs(Node* n, const char* name, bool value);
241
+ TORCH_API void addInputs(
242
+ Node* n,
243
+ const char* name,
244
+ const c10::optional<bool>& value);
245
+ TORCH_API void addInputs(Node* n, const char* name, double value);
246
+ TORCH_API void addInputs(
247
+ Node* n,
248
+ const char* name,
249
+ const c10::optional<double>& value);
250
+ TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value);
251
+ TORCH_API void addInputs(
252
+ Node* n,
253
+ const char* name,
254
+ const c10::optional<at::Scalar>& value);
255
+ TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value);
256
+ TORCH_API void addInputs(
257
+ Node* n,
258
+ const char* name,
259
+ const c10::optional<at::Tensor>& value);
260
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<int64_t> value);
261
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value);
262
+ TORCH_API void addInputs(
263
+ Node* n,
264
+ const char* name,
265
+ c10::optional<c10::SymInt> value);
266
+ TORCH_API void addInputs(
267
+ Node* n,
268
+ const char* name,
269
+ const c10::optional<ArrayRef<int64_t>>& value);
270
+ TORCH_API void addInputs(
271
+ Node* n,
272
+ const char* name,
273
+ const at::OptionalIntArrayRef& opt_value);
274
+ TORCH_API void addInputs(
275
+ Node* n,
276
+ const char* name,
277
+ const at::OptionalSymIntArrayRef& opt_value);
278
+ TORCH_API void addInputs(
279
+ Node* n,
280
+ const char* name,
281
+ ArrayRef<at::Tensor> value,
282
+ bool allow_undefined = false);
283
+ TORCH_API void addInputs(
284
+ Node* n,
285
+ const char* name,
286
+ std::vector<at::Tensor> value,
287
+ bool allow_undefined = false);
288
+ TORCH_API void addInputs(
289
+ Node* n,
290
+ const char* name,
291
+ at::ITensorListRef value,
292
+ bool allow_undefined = false);
293
+ TORCH_API void addInputs(
294
+ Node* n,
295
+ const char* name,
296
+ const List<c10::optional<at::Tensor>>& value);
297
+ TORCH_API void addInputs(
298
+ Node* n,
299
+ const char* name,
300
+ ArrayRef<c10::intrusive_ptr<c10::ivalue::Object>> value,
301
+ const c10::ClassTypePtr& class_type);
302
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<double> value);
303
+ TORCH_API void addInputs(
304
+ Node* n,
305
+ const char* name,
306
+ const c10::optional<ArrayRef<double>>& value);
307
+ TORCH_API void addInputs(
308
+ Node* n,
309
+ const char* name,
310
+ const c10::string_view value);
311
+ TORCH_API void addInputs(
312
+ Node* n,
313
+ const char* name,
314
+ const c10::optional<c10::string_view>& value);
315
+ TORCH_API void addInputs(Node* n, const char* name, at::Device value);
316
+ TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream);
317
+ TORCH_API void addInputs(Node* n, const char* name, at::Layout value);
318
+ TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value);
319
+ TORCH_API void addInputs(
320
+ Node* n,
321
+ const char* name,
322
+ const c10::optional<at::ScalarType>& value);
323
+ TORCH_API void addInputs(
324
+ Node* n,
325
+ const char* name,
326
+ const c10::optional<at::Device>& value);
327
+ TORCH_API void addInputs(
328
+ Node* n,
329
+ const char* name,
330
+ const c10::optional<at::Layout>& value);
331
+ TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value);
332
+ TORCH_API void addInputs(
333
+ Node* n,
334
+ const char* name,
335
+ c10::optional<at::DimnameList> value);
336
+ TORCH_API void addInputs(
337
+ Node* n,
338
+ const char* name,
339
+ const c10::optional<at::MemoryFormat>& value);
340
+ TORCH_API void addInputs(
341
+ Node* n,
342
+ const char* name,
343
+ const c10::optional<at::Generator>& value);
344
+
345
+ inline void addInputs(
346
+ Node* n,
347
+ const char* name,
348
+ const std::vector<bool>& value) {
349
+ AT_ERROR("Tracing a list of bool type is currently not supported!");
350
+ }
351
+
352
+ template <typename T>
353
+ void addInputs(Node* n, const char* name, ArrayRef<T> value) {
354
+ AT_ERROR("Tracing a list of arbitrary type is currently not supported!");
355
+ }
356
+ template <typename K, typename V>
357
+ void addInputs(
358
+ Node* n,
359
+ const char* name,
360
+ const std::unordered_map<K, V>& value) {
361
+ AT_ERROR("Tracing a dict of arbitrary types is currently not supported!");
362
+ }
363
+
364
+ template <size_t N>
365
+ void addInputs(Node* n, const char* name, std::array<bool, N> value) {
366
+ throw std::runtime_error(
367
+ "Found an unsupported argument type in the JIT tracer. File a bug report.");
368
+ }
369
+
370
+ TORCH_API void addInputs(
371
+ Node* n,
372
+ const char* name,
373
+ const c10::intrusive_ptr<c10::ivalue::Object>& obj);
374
+
375
+ TORCH_API void ensureUniqueIfOutOfPlaced(
376
+ const char* name,
377
+ const at::Tensor& tensor);
378
+ TORCH_API void ensureUniqueIfOutOfPlaced(
379
+ const char* name,
380
+ const c10::optional<at::Tensor>& tensor);
381
+
382
+ template <
383
+ typename T,
384
+ typename = torch::enable_if_t<
385
+ (!std::is_convertible_v<torch::decay_t<T>, at::TensorList> &&
386
+ !std::is_convertible_v<torch::decay_t<T>, c10::List<at::Tensor>> &&
387
+ !std::is_convertible_v<torch::decay_t<T>, at::Tensor> &&
388
+ !std::is_convertible_v<
389
+ torch::decay_t<T>,
390
+ c10::intrusive_ptr<c10::ivalue::Object>>)>>
391
+ void addOutput(Node* node, T&&) {
392
+ AT_ERROR(
393
+ "Found an unsupported argument type ",
394
+ c10::demangle_type<T>(),
395
+ " in the JIT tracer. File a bug report.");
396
+ }
397
+ TORCH_API void addOutput(Node* node, const at::Tensor& tensor);
398
+ TORCH_API void setOutput(Value* value, const at::Tensor& output);
399
+ TORCH_API void addOutput(Node* node, const std::vector<at::Tensor>& list);
400
+ TORCH_API void addOutput(Node* node, const c10::List<at::Tensor>& list);
401
+ TORCH_API void addOutput(
402
+ Node* node,
403
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
404
+
405
+ TORCH_API autograd::Variable getSizeOf(
406
+ const autograd::Variable& var,
407
+ int64_t dim);
408
+
409
+ TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var);
410
+
411
+ } // namespace tracer
412
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ #include <c10/util/SmallVector.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <torch/csrc/jit/frontend/lexer.h>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ // Trees are used to represent all forms of TC IR, pre- and post-typechecking.
16
+ // Rather than have a full class hierarchy for all TC statements, trees are a
17
+ // slight variation of Lisp s-expressions. For instance, the expression a*b+1
18
+ // is represented as:
19
+ // (+ (* (ident a) (ident b)) (const 1))
20
+ // Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which
21
+ // define stringValue(). Everything else is a Compound object, which has a
22
+ // 'kind' that is a token from lexer.h's TokenKind enum. Single-character
23
+ // operators like '+' are represented using the character itself (so, add.kind()
24
+ // would be '+'). Each Compound object also contains a list of subtrees and is
25
+ // associated with a SourceRange for error reporting.
26
+ // Memory management of trees is done using intrusive_ptr.
27
+
28
+ struct Tree;
29
+ using TreeRef = c10::intrusive_ptr<Tree>;
30
+ using TreeList = at::SmallVector<TreeRef, 4>;
31
+
32
+ struct Tree : c10::intrusive_ptr_target {
33
+ Tree(int kind_) : kind_(kind_) {}
34
+ int kind() const {
35
+ return kind_;
36
+ }
37
+ virtual bool isAtom() const {
38
+ return true;
39
+ }
40
+ virtual const SourceRange& range() const {
41
+ throw std::runtime_error("is an Atom");
42
+ }
43
+ virtual const std::string& stringValue() const {
44
+ throw std::runtime_error("stringValue can only be called on TK_STRING");
45
+ }
46
+ virtual const TreeList& trees() const {
47
+ static const TreeList empty_trees = {};
48
+ return empty_trees;
49
+ }
50
+ const TreeRef& tree(size_t i) const {
51
+ return trees().at(i);
52
+ }
53
+ virtual TreeRef map(const std::function<TreeRef(TreeRef)>& fn) {
54
+ (void)fn;
55
+ c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
56
+ // from a raw `this` pointer
57
+ // so we need to bump the refcount
58
+ // to account for this ownership
59
+ return TreeRef::reclaim(this);
60
+ }
61
+ template <typename... Args>
62
+ void match(int k, Args&... args) const {
63
+ matchD(k, "unknown", 0, args...);
64
+ }
65
+ template <typename... Args>
66
+ void matchD(int k, const char* filename, int lineno, Args&... args) const {
67
+ std::initializer_list<TreeRef*> vars = {args...};
68
+ matchNumSubtreesD(k, filename, lineno, vars.size(), true);
69
+ size_t i = 0;
70
+ for (TreeRef* v : vars) {
71
+ *v = trees()[i++];
72
+ }
73
+ }
74
+ void matchNumSubtrees(int k, size_t expected_subtrees) {
75
+ return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false);
76
+ }
77
+ void matchNumSubtreesD(
78
+ int k,
79
+ const char* filename,
80
+ int lineno,
81
+ size_t expected_subtrees,
82
+ bool allow_more) const {
83
+ if (kind() != k) {
84
+ std::stringstream ss;
85
+ ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k)
86
+ << "' but found '" << kindToString(kind()) << "'\n";
87
+ range().highlight(ss);
88
+ throw std::runtime_error(ss.str());
89
+ }
90
+ if (trees().size() < expected_subtrees ||
91
+ (!allow_more && trees().size() != expected_subtrees)) {
92
+ std::stringstream ss;
93
+ ss << filename << ":" << lineno << ": expected at least "
94
+ << expected_subtrees << " subtrees, but found only " << trees().size()
95
+ << "\n";
96
+ range().highlight(ss);
97
+ throw std::runtime_error(ss.str());
98
+ }
99
+ }
100
+ ~Tree() override = default;
101
+
102
+ private:
103
+ int kind_;
104
+ };
105
+
106
+ struct String : public Tree {
107
+ String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {}
108
+ const std::string& stringValue() const override {
109
+ return value_;
110
+ }
111
+ template <typename... Args>
112
+ static TreeRef create(Args&&... args) {
113
+ return c10::make_intrusive<String>(std::forward<Args>(args)...);
114
+ }
115
+
116
+ private:
117
+ std::string value_;
118
+ };
119
+
120
+ static SourceRange mergeRanges(SourceRange c, const TreeList& others) {
121
+ for (const auto& t : others) {
122
+ if (t->isAtom())
123
+ continue;
124
+ size_t s = std::min(c.start(), t->range().start());
125
+ size_t e = std::max(c.end(), t->range().end());
126
+ c = SourceRange(c.source(), s, e);
127
+ }
128
+ return c;
129
+ }
130
+
131
+ struct Compound : public Tree {
132
+ Compound(int kind, SourceRange range)
133
+ : Tree(kind), range_(std::move(range)) {}
134
+ Compound(int kind, const SourceRange& range_, TreeList&& trees_)
135
+ : Tree(kind),
136
+ range_(mergeRanges(range_, trees_)),
137
+ trees_(std::move(trees_)) {}
138
+ const TreeList& trees() const override {
139
+ return trees_;
140
+ }
141
+ static TreeRef create(
142
+ int kind,
143
+ const SourceRange& range_,
144
+ TreeList&& trees_) {
145
+ return c10::make_intrusive<Compound>(kind, range_, std::move(trees_));
146
+ }
147
+ bool isAtom() const override {
148
+ return false;
149
+ }
150
+ TreeRef map(const std::function<TreeRef(TreeRef)>& fn) override {
151
+ TreeList ret;
152
+ for (auto& t : trees()) {
153
+ ret.push_back(fn(t));
154
+ }
155
+ return Compound::create(kind(), range(), std::move(ret));
156
+ }
157
+
158
+ const SourceRange& range() const override {
159
+ return range_;
160
+ }
161
+
162
+ private:
163
+ SourceRange range_;
164
+ TreeList trees_;
165
+ };
166
+
167
+ // tree pretty printer
168
+ struct pretty_tree {
169
+ pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {}
170
+ const TreeRef& tree;
171
+ size_t col;
172
+ std::unordered_map<TreeRef, std::string> flat_strings;
173
+ const std::string& get_flat(const TreeRef& t) {
174
+ auto it = flat_strings.find(t);
175
+ if (it != flat_strings.end())
176
+ return it->second;
177
+
178
+ std::stringstream out;
179
+ switch (t->kind()) {
180
+ case TK_STRING:
181
+ out << t->stringValue();
182
+ break;
183
+ default:
184
+ out << "(" << kindToString(t->kind());
185
+ for (const auto& e : t->trees()) {
186
+ out << " " << get_flat(e);
187
+ }
188
+ out << ")";
189
+ break;
190
+ }
191
+ auto it_ = flat_strings.emplace(t, out.str());
192
+ return it_.first->second;
193
+ }
194
+ void print(std::ostream& out, const TreeRef& t, int indent) {
195
+ const std::string& s = get_flat(t);
196
+ if (indent + s.size() < col || t->isAtom()) {
197
+ out << s;
198
+ return;
199
+ }
200
+ std::string k = kindToString(t->kind());
201
+ out << "(" << k;
202
+ for (const auto& e : t->trees()) {
203
+ out << "\n" << std::string(indent + 2, ' ');
204
+ print(out, e, indent + 2);
205
+ }
206
+ out << ")";
207
+ }
208
+ };
209
+
210
+ static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) {
211
+ t_.print(out, t_.tree, 0);
212
+ return out << std::endl;
213
+ }
214
+
215
+ static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) {
216
+ return out << pretty_tree(t);
217
+ }
218
+
219
+ } // namespace jit
220
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h ADDED
@@ -0,0 +1,1275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/string_utils.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/strtod.h>
5
+ #include <torch/csrc/jit/frontend/tree.h>
6
+
7
+ #include <c10/util/complex.h>
8
+ #include <functional>
9
+ #include <iostream>
10
+ #include <string>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // clang-format off
17
+ // TreeView provides a statically-typed way to traverse the tree, which should
18
+ // be formed according to the grammar below.
19
+ //
20
+ // A few notes on types and their aliases:
21
+ // - List<T> is really a Tree with kind TK_LIST and elements as subtrees
22
+ // - Maybe<T> is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T
23
+ // - Builtin types are: Ident (TK_IDENT), String (TK_STRING)
24
+ //
25
+ // Param = Param(Maybe<Expr> type, Ident name) TK_PARAM
26
+ //
27
+ // Decl = Decl(List<Param> params, Maybe<Expr> return_type) TK_DECL
28
+ // Def = Def(Ident name, Decl decl, List<Stmt> body) TK_DEF
29
+ // ClassDef = ClassDef(Ident name, TK_CLASS_DEF
30
+ // Maybe<Expr> superclass,
31
+ // List<Stmt> body)
32
+ //
33
+ // Stmt = If(Expr cond, List<Stmt> true_body, List<Stmt> false_body) TK_IF
34
+ // | For(List<Expr> targets, List<Expr> iters, List<Stmt> body) TK_FOR
35
+ // | While(Expr cond, List<Stmt> body) TK_WHILE
36
+ // | Global(List<Ident> idents) TK_GLOBAL
37
+ // -- NB: the only type of Expr's allowed on lhs are Var
38
+ // Or a tuple containing Var with an optional terminating Starred
39
+ // | Assign(Expr lhs, Maybe<Expr> rhs, Maybe<Expr> type) TK_ASSIGN
40
+ // | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN
41
+ // | Return(List<Expr> values) TK_RETURN
42
+ // | ExprStmt(List<Expr> expr) TK_EXPR_STMT
43
+ // | Raise(Expr expr) TK_RAISE
44
+ // | Def TK_DEF
45
+ // | With(List<WithItem> targets, List<Stmt> body) TK_WITH
46
+ //
47
+ // Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR
48
+ // | BinOp(Expr lhs, Expr rhs)
49
+ // | And TK_AND
50
+ // | Or TK_OR
51
+ // | Lt '<'
52
+ // | Gt '>'
53
+ // | Eq TK_EQ
54
+ // | Le TK_LE
55
+ // | Ge TK_GE
56
+ // | Ne TK_NE
57
+ // | Is TK_IS
58
+ // | IsNot TK_ISNOT
59
+ // | Add '+'
60
+ // | Sub '-'
61
+ // | Mul '*'
62
+ // | Div '/'
63
+ // | Mod '%'
64
+ // | MatMult '@'
65
+ // | Pow TK_POW
66
+ // | UnaryOp(Expr expr)
67
+ // | Not TK_NOT
68
+ // | USub '-'
69
+ // | Const(String value) TK_CONST
70
+ // -- NB: x.name(y) is desugared into name(x, y)
71
+ // | Apply(Ident name, List<Expr> args, List<Attribute> kwargs) TK_APPLY
72
+ // | Select(Expr value, Ident selector) '.'
73
+ // | Subscript(Expr value, List<Expr> subscript_exprs) TK_SUBSCRIPT
74
+ // | SliceExpr(Maybe<Expr> start, Maybe<Expr> end) TK_SLICE_EXPR
75
+ // | Var(Ident name) TK_VAR
76
+ // | ListLiteral(List<Expr> inputs) TK_LIST_LITERAL
77
+ // | TupleLiteral(List<Expr> inputs) TK_TUPLE_LITERAL
78
+ // | Starred(Expr expr) TK_STARRED
79
+ // | WithItem(Expr target, Maybe<Var> var) TK_WITH_ITEM
80
+ // -- NB: only allowed expressions are Const or List(Const)
81
+ // (List as a value, not type constructor)
82
+ // Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE
83
+ //
84
+ // AugAssignKind =
85
+ // | Add() TK_PLUS_EQ
86
+ // | Sub() TK_MINUS_EQ
87
+ // | Mul() TK_TIMES_EQ
88
+ // | Div() TK_DIV_EQ
89
+ // | Mod() TK_MOD_EQ
90
+ //
91
+
92
+ // Each subclass of TreeView should provide:
93
+ // 1. Constructor that takes a TreeRef, and checks that it's of the right type.
94
+ // 2. Accessors that get underlying information out of the object. If they
95
+ // return subtrees, they should wrap them in appropriate views too.
96
+ // 3. Static method 'create' that creates the underlying TreeRef object
97
+ // for every TreeRef kind that has a TreeView, the parser always uses
98
+ // (e.g.) Ident::create rather than Compound::Create, this means that
99
+ // changes to the structure of Ident are always made right here rather
100
+ // than both in the parser and in this code.
101
+ // XXX: these structs should have no fields to prevent slicing when passing by value
102
+ // clang-format on
103
+ struct TreeView {
104
+ explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {}
105
+ TreeRef tree() const {
106
+ return tree_;
107
+ }
108
+ const SourceRange& range() const {
109
+ return tree_->range();
110
+ }
111
+ operator TreeRef() const {
112
+ return tree_;
113
+ }
114
+ const TreeRef& get() const {
115
+ return tree_;
116
+ }
117
+ int kind() const {
118
+ return tree_->kind();
119
+ }
120
+ void dump() const {
121
+ std::cout << tree_;
122
+ }
123
+
124
+ protected:
125
+ const TreeRef& subtree(size_t i) const {
126
+ return tree_->trees().at(i);
127
+ }
128
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
129
+ TreeRef tree_;
130
+ };
131
+
132
+ template <typename T>
133
+ struct ListIterator {
134
+ ListIterator(TreeList::const_iterator it) : it(it) {}
135
+ bool operator!=(const ListIterator& rhs) const {
136
+ return it != rhs.it;
137
+ }
138
+ bool operator==(const ListIterator& rhs) const {
139
+ return it == rhs.it;
140
+ }
141
+ T operator*() const {
142
+ return T(*it);
143
+ }
144
+ ListIterator& operator+=(std::ptrdiff_t n) {
145
+ it += n;
146
+ return *this;
147
+ }
148
+ ListIterator& operator++() {
149
+ ++it;
150
+ return *this;
151
+ }
152
+ ListIterator& operator--() {
153
+ --it;
154
+ return *this;
155
+ }
156
+
157
+ private:
158
+ TreeList::const_iterator it;
159
+ };
160
+
161
+ template <typename T>
162
+ struct List : public TreeView {
163
+ using iterator = ListIterator<T>;
164
+ using const_iterator = ListIterator<T>;
165
+
166
+ List(const TreeRef& tree) : TreeView(tree) {
167
+ tree->match(TK_LIST);
168
+ // Iterate over list to temporarily instantiate Ts that will check the type
169
+ for (const T& elem : *this) {
170
+ (void)elem; // silence unused warning
171
+ }
172
+ }
173
+ iterator begin() const {
174
+ return iterator(tree_->trees().begin());
175
+ }
176
+ iterator end() const {
177
+ return iterator(tree_->trees().end());
178
+ }
179
+ bool empty() const {
180
+ return tree_->trees().begin() == tree_->trees().end();
181
+ }
182
+ T operator[](size_t i) const {
183
+ return T(subtree(i));
184
+ }
185
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
186
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
187
+ }
188
+ static List create(const SourceRange& range, const std::vector<T>& subtrees) {
189
+ TreeList type_erased_sub{subtrees.begin(), subtrees.end()};
190
+ return List(Compound::create(TK_LIST, range, std::move(type_erased_sub)));
191
+ }
192
+ static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) {
193
+ return List(Compound::create(TK_LIST, range, std::move(subtrees)));
194
+ }
195
+ size_t size() const {
196
+ return tree_->trees().size();
197
+ }
198
+ };
199
+
200
+ template <typename T>
201
+ struct Maybe : public TreeView {
202
+ explicit Maybe(const TreeRef& tree) : TreeView(tree) {
203
+ tree_->match(TK_OPTION);
204
+ if (tree_->trees().size() > 1)
205
+ throw ErrorReport(tree) << "Maybe trees can have at most one subtree";
206
+ }
207
+ /* implicit */ Maybe(const T& tree) : TreeView(tree) {}
208
+ bool present() const {
209
+ return tree_->trees().size() > 0;
210
+ }
211
+ T get() const {
212
+ return T(tree_->trees().at(0));
213
+ }
214
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
215
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
216
+ }
217
+ static Maybe<T> create(const SourceRange& range) {
218
+ return Maybe<T>(Compound::create(TK_OPTION, range, {}));
219
+ }
220
+ static Maybe<T> create(const SourceRange& range, const T& value) {
221
+ return Maybe<T>(Compound::create(TK_OPTION, range, {value}));
222
+ }
223
+ };
224
+
225
+ struct Ident : public TreeView {
226
+ explicit Ident(const TreeRef& tree) : TreeView(tree) {
227
+ tree_->match(TK_IDENT);
228
+ }
229
+ const std::string& name() const {
230
+ return subtree(0)->stringValue();
231
+ }
232
+ static Ident create(const SourceRange& range, std::string name) {
233
+ return Ident(
234
+ Compound::create(TK_IDENT, range, {String::create(std::move(name))}));
235
+ }
236
+ };
237
+
238
+ ////////////////////////////////////////////////////////////////////////////////
239
+ // Base types (production LHS)
240
+ ////////////////////////////////////////////////////////////////////////////////
241
+
242
+ struct Stmt : public TreeView {
243
+ explicit Stmt(const TreeRef& tree) : TreeView(tree) {
244
+ switch (tree->kind()) {
245
+ case TK_IF:
246
+ case TK_FOR:
247
+ case TK_WHILE:
248
+ case TK_GLOBAL:
249
+ case TK_ASSIGN:
250
+ case TK_AUG_ASSIGN:
251
+ case TK_RETURN:
252
+ case TK_EXPR_STMT:
253
+ case TK_RAISE:
254
+ case TK_ASSERT:
255
+ case TK_PASS:
256
+ case TK_BREAK:
257
+ case TK_DELETE:
258
+ case TK_CONTINUE:
259
+ case TK_DEF:
260
+ case TK_WITH:
261
+ return;
262
+ default:
263
+ throw ErrorReport(tree)
264
+ << kindToString(tree->kind()) << " is not a valid Stmt";
265
+ }
266
+ }
267
+ };
268
+
269
+ struct Expr : public TreeView {
270
+ explicit Expr(const TreeRef& tree) : TreeView(tree) {
271
+ switch (tree->kind()) {
272
+ case TK_IF_EXPR:
273
+ case TK_AND:
274
+ case TK_OR:
275
+ case '<':
276
+ case '>':
277
+ case TK_IS:
278
+ case TK_ISNOT:
279
+ case TK_EQ:
280
+ case TK_LE:
281
+ case TK_GE:
282
+ case TK_NE:
283
+ case '+':
284
+ case '-':
285
+ case TK_UNARY_MINUS:
286
+ case '~':
287
+ case '*':
288
+ case TK_STARRED:
289
+ case '/':
290
+ case '%':
291
+ case TK_NOT:
292
+ case TK_CONST:
293
+ case TK_STRINGLITERAL:
294
+ case TK_TRUE:
295
+ case TK_FALSE:
296
+ case TK_NONE:
297
+ case TK_NONE_TYPE:
298
+ case TK_CAST:
299
+ case TK_APPLY:
300
+ case '.':
301
+ case TK_SUBSCRIPT:
302
+ case TK_SLICE_EXPR:
303
+ case TK_VAR:
304
+ case TK_LIST_LITERAL:
305
+ case TK_TUPLE_LITERAL:
306
+ case TK_DICT_LITERAL:
307
+ case '@':
308
+ case TK_POW:
309
+ case TK_LSHIFT:
310
+ case TK_RSHIFT:
311
+ case TK_FLOOR_DIV:
312
+ case '&':
313
+ case '^':
314
+ case '|':
315
+ case TK_LIST_COMP:
316
+ case TK_DICT_COMP:
317
+ case TK_DOTS:
318
+ case TK_IN:
319
+ case TK_WITH_ITEM:
320
+ return;
321
+ default:
322
+ throw ErrorReport(tree)
323
+ << kindToString(tree->kind()) << " is not a valid Expr";
324
+ }
325
+ }
326
+ };
327
+
328
+ ////////////////////////////////////////////////////////////////////////////////
329
+ // Helper nodes (mostly for function arguments)
330
+ ////////////////////////////////////////////////////////////////////////////////
331
+
332
+ struct Attribute : public TreeView {
333
+ explicit Attribute(const TreeRef& tree) : TreeView(tree) {
334
+ tree_->match(TK_ATTRIBUTE);
335
+ }
336
+ Ident name() const {
337
+ return Ident(subtree(0));
338
+ }
339
+ Expr value() const {
340
+ return Expr(subtree(1));
341
+ }
342
+ static Attribute create(
343
+ const SourceRange& range,
344
+ const Ident& name,
345
+ const TreeRef& value) {
346
+ return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value}));
347
+ }
348
+ };
349
+
350
+ struct Param : public TreeView {
351
+ explicit Param(const TreeRef& tree) : TreeView(tree) {
352
+ tree_->match(TK_PARAM);
353
+ }
354
+ static Param create(
355
+ const SourceRange& range,
356
+ const Ident& ident,
357
+ const Maybe<Expr>& type,
358
+ const Maybe<Expr>& def,
359
+ bool kwarg_only) {
360
+ TreeRef kwarg_only_tree =
361
+ Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {});
362
+ return Param(Compound::create(
363
+ TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)}));
364
+ }
365
+ Ident ident() const {
366
+ return Ident(subtree(0));
367
+ }
368
+ Maybe<Expr> type() const {
369
+ return Maybe<Expr>(subtree(1));
370
+ }
371
+ Maybe<Expr> defaultValue() const {
372
+ return Maybe<Expr>(subtree(2));
373
+ }
374
+ bool kwarg_only() const {
375
+ return TK_TRUE == subtree(3)->kind();
376
+ }
377
+ Param withType(const Maybe<Expr>& typ) const {
378
+ return Param::create(range(), ident(), typ, defaultValue(), kwarg_only());
379
+ }
380
+ };
381
+
382
+ ////////////////////////////////////////////////////////////////////////////////
383
+ // Top level definitions
384
+ ////////////////////////////////////////////////////////////////////////////////
385
+
386
+ struct Decl : public TreeView {
387
+ explicit Decl(const TreeRef& tree) : TreeView(tree) {
388
+ tree->match(TK_DECL);
389
+ }
390
+ List<Param> params() const {
391
+ return List<Param>(subtree(0));
392
+ }
393
+ Maybe<Expr> return_type() const {
394
+ return Maybe<Expr>(subtree(1));
395
+ }
396
+ static Decl create(
397
+ const SourceRange& range,
398
+ const List<Param>& params,
399
+ const Maybe<Expr>& return_type) {
400
+ return Decl(Compound::create(TK_DECL, range, {params, return_type}));
401
+ }
402
+ };
403
+
404
+ struct Def : public TreeView {
405
+ explicit Def(const TreeRef& tree) : TreeView(tree) {
406
+ tree->match(TK_DEF);
407
+ }
408
+ Def withName(std::string new_name) const {
409
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
410
+ return create(range(), new_ident, decl(), statements());
411
+ }
412
+ Def withDecl(const Decl& decl) const {
413
+ return create(range(), name(), decl, statements());
414
+ }
415
+ Ident name() const {
416
+ return Ident(subtree(0));
417
+ }
418
+ Decl decl() const {
419
+ return Decl(subtree(1));
420
+ }
421
+ List<Stmt> statements() const {
422
+ return List<Stmt>(subtree(2));
423
+ }
424
+ static Def create(
425
+ const SourceRange& range,
426
+ const Ident& name,
427
+ const Decl& decl,
428
+ const List<Stmt>& stmts) {
429
+ return Def(Compound::create(TK_DEF, range, {name, decl, stmts}));
430
+ }
431
+ };
432
+
433
+ // Property represents a named attribute combined with a getter and setter
434
+ // method to access and mutate that attribute.
435
+ struct Property : public TreeView {
436
+ explicit Property(const TreeRef& tree) : TreeView(tree) {
437
+ tree->match(TK_PROP);
438
+ }
439
+ Ident name() const {
440
+ return Ident(subtree(0));
441
+ }
442
+ Def getter() const {
443
+ return Def(subtree(1));
444
+ }
445
+ Maybe<Def> setter() const {
446
+ return Maybe<Def>(subtree(2));
447
+ }
448
+ static Property create(
449
+ const SourceRange& range,
450
+ const Ident& name,
451
+ const Def& getter,
452
+ const Maybe<Def>& setter) {
453
+ return Property(Compound::create(TK_PROP, range, {name, getter, setter}));
454
+ }
455
+ };
456
+
457
+ struct Assign;
458
+
459
+ struct ClassDef : public TreeView {
460
+ explicit ClassDef(const TreeRef& tree) : TreeView(tree) {
461
+ tree->match(TK_CLASS_DEF);
462
+ }
463
+ explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) {
464
+ tree_->match(TK_CLASS_DEF);
465
+ }
466
+ ClassDef withName(std::string new_name) const {
467
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
468
+ return create(range(), new_ident, superclass(), body());
469
+ }
470
+ Ident name() const {
471
+ return Ident(subtree(0));
472
+ }
473
+ Maybe<Expr> superclass() const {
474
+ return Maybe<Expr>(subtree(1));
475
+ }
476
+ List<Stmt> body() const {
477
+ return List<Stmt>(subtree(2));
478
+ }
479
+ Maybe<List<Property>> properties() const {
480
+ return Maybe<List<Property>>(subtree(3));
481
+ }
482
+ Maybe<List<Assign>> assigns() const {
483
+ return Maybe<List<Assign>>(subtree(4));
484
+ }
485
+ static ClassDef create(
486
+ const SourceRange& range,
487
+ const Ident& name,
488
+ const Maybe<Expr>& superclass,
489
+ const List<Stmt>& body) {
490
+ return ClassDef(Compound::create(
491
+ TK_CLASS_DEF,
492
+ range,
493
+ {name,
494
+ superclass,
495
+ body,
496
+ Maybe<List<Property>>::create(range),
497
+ Maybe<List<Assign>>::create(range)}));
498
+ }
499
+ static ClassDef create(
500
+ const SourceRange& range,
501
+ const Ident& name,
502
+ const Maybe<Expr>& superclass,
503
+ const List<Stmt>& body,
504
+ const List<Property>& properties,
505
+ const List<Assign>& assigns);
506
+ };
507
+
508
+ TORCH_API std::vector<std::string> getUnresolvedClassAttributes(
509
+ const ClassDef& def);
510
+
511
+ ////////////////////////////////////////////////////////////////////////////////
512
+ // Statements
513
+ ////////////////////////////////////////////////////////////////////////////////
514
+
515
+ struct If : public Stmt {
516
+ explicit If(const TreeRef& tree) : Stmt(tree) {
517
+ tree_->match(TK_IF);
518
+ }
519
+ Expr cond() const {
520
+ return Expr(subtree(0));
521
+ }
522
+ List<Stmt> trueBranch() const {
523
+ return List<Stmt>(subtree(1));
524
+ }
525
+ List<Stmt> falseBranch() const {
526
+ return List<Stmt>(subtree(2));
527
+ }
528
+ If withNewBranches(
529
+ const List<Stmt>& true_branch,
530
+ const List<Stmt>& false_branch) const {
531
+ return create(range(), cond(), true_branch, false_branch);
532
+ }
533
+ static If create(
534
+ const SourceRange& range,
535
+ const Expr& cond,
536
+ const List<Stmt>& true_branch,
537
+ const List<Stmt>& false_branch) {
538
+ return If(
539
+ Compound::create(TK_IF, range, {cond, true_branch, false_branch}));
540
+ }
541
+ };
542
+
543
+ struct While : public Stmt {
544
+ explicit While(const TreeRef& tree) : Stmt(tree) {
545
+ tree_->match(TK_WHILE);
546
+ }
547
+ Expr cond() const {
548
+ return Expr(subtree(0));
549
+ }
550
+ List<Stmt> body() const {
551
+ return List<Stmt>(subtree(1));
552
+ }
553
+ static While create(
554
+ const SourceRange& range,
555
+ const Expr& cond,
556
+ const List<Stmt>& body) {
557
+ return While(Compound::create(TK_WHILE, range, {cond, body}));
558
+ }
559
+ };
560
+
561
+ struct For : public Stmt {
562
+ explicit For(const TreeRef& tree) : Stmt(tree) {
563
+ tree->match(TK_FOR);
564
+ }
565
+ List<Expr> targets() const {
566
+ return List<Expr>(subtree(0));
567
+ }
568
+ List<Expr> itrs() const {
569
+ return List<Expr>(subtree(1));
570
+ }
571
+ List<Stmt> body() const {
572
+ return List<Stmt>(subtree(2));
573
+ }
574
+ static For create(
575
+ const SourceRange& range,
576
+ const List<Expr>& targets,
577
+ const List<Expr>& itrs,
578
+ const List<Stmt>& body) {
579
+ return For(Compound::create(TK_FOR, range, {targets, itrs, body}));
580
+ }
581
+ };
582
+
583
+ // TODO: supports only single comprehension for now
584
+ struct ListComp : public Expr {
585
+ explicit ListComp(const TreeRef& tree) : Expr(tree) {
586
+ tree->match(TK_LIST_COMP);
587
+ }
588
+ Expr elt() const {
589
+ return Expr(subtree(0));
590
+ }
591
+ Expr target() const {
592
+ return Expr(subtree(1));
593
+ }
594
+ Expr iter() const {
595
+ return Expr(subtree(2));
596
+ }
597
+ // TODO: no ifs for now
598
+ static ListComp create(
599
+ const SourceRange& range,
600
+ const Expr& elt,
601
+ const Expr& target,
602
+ const Expr& iter) {
603
+ return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter}));
604
+ }
605
+ };
606
+
607
+ // TODO: supports only single comprehension for now
608
+ struct DictComp : public Expr {
609
+ explicit DictComp(const TreeRef& tree) : Expr(tree) {
610
+ tree->match(TK_DICT_COMP);
611
+ }
612
+ Expr key() const {
613
+ return Expr(subtree(0));
614
+ }
615
+ Expr value() const {
616
+ return Expr(subtree(1));
617
+ }
618
+ Expr target() const {
619
+ return Expr(subtree(2));
620
+ }
621
+ Expr iter() const {
622
+ return Expr(subtree(3));
623
+ }
624
+ // TODO: no ifs for now
625
+ static DictComp create(
626
+ const SourceRange& range,
627
+ const Expr& key,
628
+ const Expr& value,
629
+ const Expr& target,
630
+ const Expr& iter) {
631
+ return DictComp(
632
+ Compound::create(TK_DICT_COMP, range, {key, value, target, iter}));
633
+ }
634
+ };
635
+
636
+ struct Global : public Stmt {
637
+ explicit Global(const TreeRef& tree) : Stmt(tree) {
638
+ tree_->match(TK_GLOBAL);
639
+ }
640
+ List<Ident> names() {
641
+ return List<Ident>(subtree(0));
642
+ }
643
+ static Global create(const SourceRange& range, const List<Ident>& names) {
644
+ return Global(Compound::create(TK_GLOBAL, range, {names}));
645
+ }
646
+ };
647
+
648
+ struct AugAssignKind : public TreeView {
649
+ explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) {
650
+ switch (tree->kind()) {
651
+ case '+':
652
+ case '-':
653
+ case '*':
654
+ case '/':
655
+ case '%':
656
+ case '|':
657
+ case '&':
658
+ case '^':
659
+ case TK_POW:
660
+ case TK_LSHIFT:
661
+ case TK_RSHIFT:
662
+ return;
663
+ default:
664
+ throw ErrorReport(tree) << "is not a valid AugAssignKind";
665
+ }
666
+ }
667
+ };
668
+
669
+ // Augmented assignment, like "foo += bar"
670
+ struct AugAssign : public Stmt {
671
+ explicit AugAssign(const TreeRef& tree) : Stmt(tree) {
672
+ tree_->match(TK_AUG_ASSIGN);
673
+ }
674
+ static AugAssign create(
675
+ const SourceRange& range,
676
+ const Expr& lhs,
677
+ const AugAssignKind& aug_op,
678
+ const Expr& rhs) {
679
+ return AugAssign(
680
+ Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs}));
681
+ }
682
+ Expr lhs() const {
683
+ return Expr(subtree(0));
684
+ }
685
+ int aug_op() const {
686
+ return subtree(1)->kind();
687
+ }
688
+ Expr rhs() const {
689
+ return Expr(subtree(2));
690
+ }
691
+ };
692
+
693
+ struct Assign : public Stmt {
694
+ explicit Assign(const TreeRef& tree) : Stmt(tree) {
695
+ tree_->match(TK_ASSIGN);
696
+ }
697
+ static Assign create(
698
+ const SourceRange& range,
699
+ const List<Expr>& lhs,
700
+ const Maybe<Expr>& rhs,
701
+ const Maybe<Expr>& type) {
702
+ return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type}));
703
+ }
704
+
705
+ List<Expr> lhs_list() const {
706
+ return List<Expr>(subtree(0));
707
+ }
708
+
709
+ Expr lhs() const {
710
+ const auto& li = lhs_list();
711
+ TORCH_INTERNAL_ASSERT(li.size() == 1);
712
+ return *li.begin();
713
+ }
714
+
715
+ Maybe<Expr> rhs() const {
716
+ return Maybe<Expr>(subtree(1));
717
+ }
718
+
719
+ Maybe<Expr> type() const {
720
+ return Maybe<Expr>(subtree(2));
721
+ }
722
+ };
723
+
724
+ struct Return : public Stmt {
725
+ explicit Return(const TreeRef& tree) : Stmt(tree) {
726
+ tree_->match(TK_RETURN);
727
+ }
728
+ Expr expr() const {
729
+ return Expr(subtree(0));
730
+ }
731
+ static Return create(const SourceRange& range, const Expr& value) {
732
+ return Return(Compound::create(TK_RETURN, range, {value}));
733
+ }
734
+ };
735
+
736
+ struct Raise : public Stmt {
737
+ explicit Raise(const TreeRef& tree) : Stmt(tree) {
738
+ tree_->match(TK_RAISE);
739
+ }
740
+ Expr expr() const {
741
+ return Expr(subtree(0));
742
+ }
743
+ static Raise create(const SourceRange& range, const Expr& expr) {
744
+ return Raise(Compound::create(TK_RAISE, range, {expr}));
745
+ }
746
+ };
747
+
748
+ struct Assert : public Stmt {
749
+ explicit Assert(const TreeRef& tree) : Stmt(tree) {
750
+ tree_->match(TK_ASSERT);
751
+ }
752
+ Expr test() const {
753
+ return Expr(subtree(0));
754
+ }
755
+ Maybe<Expr> msg() const {
756
+ return Maybe<Expr>(subtree(1));
757
+ }
758
+ static Assert create(
759
+ const SourceRange& range,
760
+ const Expr& test,
761
+ const Maybe<Expr>& msg) {
762
+ return Assert(Compound::create(TK_ASSERT, range, {test, msg}));
763
+ }
764
+ };
765
+
766
+ struct Pass : public Stmt {
767
+ explicit Pass(const TreeRef& tree) : Stmt(tree) {
768
+ tree_->match(TK_PASS);
769
+ }
770
+ static Pass create(const SourceRange& range) {
771
+ return Pass(Compound::create(TK_PASS, range, {}));
772
+ }
773
+ };
774
+
775
+ struct Dots : public Expr {
776
+ explicit Dots(const TreeRef& tree) : Expr(tree) {
777
+ tree_->match(TK_DOTS);
778
+ }
779
+ static Dots create(const SourceRange& range) {
780
+ return Dots(Compound::create(TK_DOTS, range, {}));
781
+ }
782
+ };
783
+
784
+ struct Break : public Stmt {
785
+ explicit Break(const TreeRef& tree) : Stmt(tree) {
786
+ tree_->match(TK_BREAK);
787
+ }
788
+ static Break create(const SourceRange& range) {
789
+ return Break(Compound::create(TK_BREAK, range, {}));
790
+ }
791
+ };
792
+
793
+ struct Continue : public Stmt {
794
+ explicit Continue(const TreeRef& tree) : Stmt(tree) {
795
+ tree_->match(TK_CONTINUE);
796
+ }
797
+ static Continue create(const SourceRange& range) {
798
+ return Continue(Compound::create(TK_CONTINUE, range, {}));
799
+ }
800
+ };
801
+
802
+ struct ExprStmt : public Stmt {
803
+ explicit ExprStmt(const TreeRef& tree) : Stmt(tree) {
804
+ tree_->match(TK_EXPR_STMT);
805
+ }
806
+ Expr expr() {
807
+ return Expr(subtree(0));
808
+ }
809
+ static ExprStmt create(const SourceRange& range, const Expr& list) {
810
+ return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list}));
811
+ }
812
+ };
813
+
814
+ ////////////////////////////////////////////////////////////////////////////////
815
+ // Expressions
816
+ ////////////////////////////////////////////////////////////////////////////////
817
+
818
+ struct BinOp : public Expr {
819
+ explicit BinOp(const TreeRef& tree) : Expr(tree) {
820
+ switch (tree->kind()) {
821
+ case TK_AND:
822
+ case TK_OR:
823
+ case '<':
824
+ case '>':
825
+ case TK_IS:
826
+ case TK_ISNOT:
827
+ case TK_EQ:
828
+ case TK_LE:
829
+ case TK_GE:
830
+ case TK_NE:
831
+ case '+':
832
+ case '*':
833
+ case '/':
834
+ case '-':
835
+ case '@':
836
+ case TK_POW:
837
+ case TK_LSHIFT:
838
+ case TK_RSHIFT:
839
+ case '%':
840
+ case '&':
841
+ case '^':
842
+ case '|':
843
+ case TK_FLOOR_DIV:
844
+ case TK_IN:
845
+ if (tree->trees().size() != 2)
846
+ throw ErrorReport(tree)
847
+ << "BinOp expected 2 subtrees, found " << tree->trees().size();
848
+ return;
849
+ default:
850
+ throw ErrorReport(tree)
851
+ << kindToString(tree->kind()) << " is not a valid BinOp";
852
+ }
853
+ }
854
+ Expr lhs() const {
855
+ return Expr(subtree(0));
856
+ }
857
+ Expr rhs() const {
858
+ return Expr(subtree(1));
859
+ }
860
+ static BinOp create(
861
+ const SourceRange& range,
862
+ int kind,
863
+ const Expr& lhs,
864
+ const Expr& rhs) {
865
+ return BinOp(Compound::create(kind, range, {lhs, rhs}));
866
+ }
867
+ };
868
+
869
+ struct UnaryOp : public Expr {
870
+ explicit UnaryOp(const TreeRef& tree) : Expr(tree) {
871
+ switch (tree->kind()) {
872
+ case TK_UNARY_MINUS:
873
+ case '~':
874
+ case TK_NOT:
875
+ if (tree->trees().size() != 1)
876
+ throw ErrorReport(tree)
877
+ << "UnaryOp expected 1 subtree, found " << tree->trees().size();
878
+ return;
879
+ default:
880
+ throw ErrorReport(tree)
881
+ << kindToString(tree->kind()) << " is not a valid UnaryOp";
882
+ }
883
+ }
884
+ static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) {
885
+ return UnaryOp(Compound::create(kind, range, {expr}));
886
+ }
887
+ };
888
+
889
+ struct Const : public Expr {
890
+ explicit Const(const TreeRef& tree) : Expr(tree) {
891
+ tree_->matchNumSubtrees(TK_CONST, 1);
892
+ }
893
+ bool isFloatingPoint() const {
894
+ if (isComplex())
895
+ return false;
896
+
897
+ bool is_inf = subtree(0)->stringValue() == "inf";
898
+ return is_inf ||
899
+ subtree(0)->stringValue().find_first_of(".eE") != std::string::npos;
900
+ }
901
+ bool isIntegral() const {
902
+ return !isFloatingPoint() && !isComplex();
903
+ }
904
+ bool isComplex() const {
905
+ return subtree(0)->stringValue().find_first_of('j') != std::string::npos;
906
+ }
907
+ int64_t asIntegral() const {
908
+ try {
909
+ // NOLINTNEXTLINE(modernize-use-nullptr)
910
+ return std::stoll(subtree(0)->stringValue(), /*__idx=*/0, /*base=*/0);
911
+ } catch (const std::out_of_range&) {
912
+ throw ErrorReport(range()) << "Integral constant out of range "
913
+ "(must fit in a signed 64 bit integer)";
914
+ }
915
+ }
916
+ double asFloatingPoint() const {
917
+ // We can't pass in nullptr as the dummy pointer gets dereferenced for
918
+ // Android version of strtod_c().
919
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
920
+ char* dummy;
921
+ return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy);
922
+ }
923
+ c10::complex<double> asComplex() const {
924
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
925
+ char* dummy;
926
+ auto str = subtree(0)->stringValue();
927
+ // Complex numbers (a+bj, where a is non-zero) are parsed as an addition
928
+ // between float/int a and a complex number "bj". When a is 0, a complex
929
+ // number bj is created as above. So, while parsing the string, we don't
930
+ // have to worry about the real component of the complex number.
931
+ auto imag =
932
+ torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy);
933
+ return c10::complex<double>(0, imag);
934
+ }
935
+ const std::string& text() const {
936
+ return subtree(0)->stringValue();
937
+ }
938
+ static Const create(const SourceRange& range, const std::string& value) {
939
+ return Const(Compound::create(TK_CONST, range, {String::create(value)}));
940
+ }
941
+ };
942
+
943
+ struct StringLiteral : public Expr {
944
+ explicit StringLiteral(const TreeRef& tree) : Expr(tree) {
945
+ tree_->matchNumSubtrees(TK_STRINGLITERAL, 1);
946
+ }
947
+ const std::string& text() const {
948
+ return subtree(0)->stringValue();
949
+ }
950
+ static StringLiteral create(
951
+ const SourceRange& range,
952
+ const std::string& value) {
953
+ return StringLiteral(
954
+ Compound::create(TK_STRINGLITERAL, range, {String::create(value)}));
955
+ }
956
+ };
957
+
958
+ struct Apply : public Expr {
959
+ explicit Apply(const TreeRef& tree) : Expr(tree) {
960
+ tree_->match(TK_APPLY);
961
+ }
962
+ Expr callee() const {
963
+ return Expr(subtree(0));
964
+ }
965
+ List<Expr> inputs() const {
966
+ return List<Expr>(subtree(1));
967
+ }
968
+ List<Attribute> attributes() const {
969
+ return List<Attribute>(subtree(2));
970
+ }
971
+ static Apply create(
972
+ const SourceRange& range,
973
+ const Expr& callee,
974
+ const List<Expr>& inputs,
975
+ const List<Attribute>& attributes) {
976
+ return Apply(
977
+ Compound::create(TK_APPLY, range, {callee, inputs, attributes}));
978
+ }
979
+ };
980
+
981
+ struct Select : public Expr {
982
+ explicit Select(const TreeRef& tree) : Expr(tree) {
983
+ tree_->match('.');
984
+ }
985
+ Expr value() const {
986
+ return Expr(subtree(0));
987
+ }
988
+ Ident selector() const {
989
+ return Ident(subtree(1));
990
+ }
991
+ static Select create(
992
+ const SourceRange& range,
993
+ const Expr& value,
994
+ const Ident& selector) {
995
+ return Select(Compound::create('.', range, {value, selector}));
996
+ }
997
+ };
998
+
999
+ struct SliceExpr : public Expr {
1000
+ explicit SliceExpr(const TreeRef& tree) : Expr(tree) {
1001
+ tree_->match(TK_SLICE_EXPR);
1002
+ }
1003
+ Maybe<Expr> start() const {
1004
+ return Maybe<Expr>(subtree(0));
1005
+ }
1006
+ Maybe<Expr> end() const {
1007
+ return Maybe<Expr>(subtree(1));
1008
+ }
1009
+ Maybe<Expr> step() const {
1010
+ return Maybe<Expr>(subtree(2));
1011
+ }
1012
+ Expr startOr(int64_t alternative) const {
1013
+ const auto startOption = start();
1014
+ return startOption.present() ? startOption.get() : createInt(alternative);
1015
+ }
1016
+ Expr endOr(int64_t alternative) const {
1017
+ const auto endOption = end();
1018
+ return endOption.present() ? endOption.get() : createInt(alternative);
1019
+ }
1020
+ Expr stepOr(int64_t alternative) const {
1021
+ const auto stepOption = step();
1022
+ return stepOption.present() ? stepOption.get() : createInt(alternative);
1023
+ }
1024
+ static SliceExpr create(
1025
+ const SourceRange& range,
1026
+ const Maybe<Expr>& start,
1027
+ const Maybe<Expr>& end,
1028
+ const Maybe<Expr>& step) {
1029
+ return SliceExpr(
1030
+ Compound::create(TK_SLICE_EXPR, range, {start, end, step}));
1031
+ }
1032
+
1033
+ private:
1034
+ Expr createInt(int64_t value) const {
1035
+ return Expr(Const::create(range(), c10::to_string(value)));
1036
+ }
1037
+ };
1038
+
1039
+ struct Subscript : public Expr {
1040
+ explicit Subscript(const TreeRef& tree) : Expr(tree) {
1041
+ tree_->match(TK_SUBSCRIPT);
1042
+ }
1043
+ Expr value() const {
1044
+ return Expr(subtree(0));
1045
+ }
1046
+ List<Expr> subscript_exprs() const {
1047
+ return List<Expr>(subtree(1));
1048
+ }
1049
+ static Subscript create(
1050
+ const SourceRange& range,
1051
+ const Expr& value,
1052
+ const List<Expr>& subscript_exprs) {
1053
+ auto whole_range = SourceRange(
1054
+ range.source(), range.start(), subscript_exprs.range().end() + 1);
1055
+ return Subscript(
1056
+ Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs}));
1057
+ }
1058
+ };
1059
+
1060
+ struct Var : public Expr {
1061
+ explicit Var(const TreeRef& tree) : Expr(tree) {
1062
+ tree_->match(TK_VAR);
1063
+ };
1064
+ Ident name() const {
1065
+ return Ident(subtree(0));
1066
+ }
1067
+ static Var create(const SourceRange& range, const Ident& name) {
1068
+ return Var(Compound::create(TK_VAR, range, {name}));
1069
+ }
1070
+ };
1071
+
1072
+ // WithItem represents an item using with a WithStmt.
1073
+ struct WithItem : public Expr {
1074
+ explicit WithItem(const TreeRef& tree) : Expr(tree) {
1075
+ tree_->match(TK_WITH_ITEM);
1076
+ }
1077
+
1078
+ Expr target() const {
1079
+ return Expr(subtree(0));
1080
+ }
1081
+
1082
+ Maybe<Var> var() const {
1083
+ return Maybe<Var>(subtree(1));
1084
+ }
1085
+
1086
+ static WithItem create(
1087
+ const SourceRange& range,
1088
+ const Expr& target,
1089
+ const Maybe<Var>& var) {
1090
+ return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var}));
1091
+ }
1092
+ };
1093
+
1094
+ // With represents a with statement consisting of a list of with items and a
1095
+ // body of statements.
1096
+ struct With : public Stmt {
1097
+ explicit With(const TreeRef& tree) : Stmt(tree) {
1098
+ tree_->match(TK_WITH);
1099
+ }
1100
+
1101
+ List<WithItem> targets() const {
1102
+ return List<WithItem>(subtree(0));
1103
+ }
1104
+
1105
+ List<Stmt> body() const {
1106
+ return List<Stmt>(subtree(1));
1107
+ }
1108
+
1109
+ static With create(
1110
+ const SourceRange& range,
1111
+ const List<WithItem>& targets,
1112
+ const List<Stmt>& body) {
1113
+ return With(Compound::create(TK_WITH, range, {targets, body}));
1114
+ }
1115
+ };
1116
+
1117
+ struct TernaryIf : public Expr {
1118
+ explicit TernaryIf(const TreeRef& tree) : Expr(tree) {
1119
+ tree_->matchNumSubtrees(TK_IF_EXPR, 3);
1120
+ };
1121
+ Expr cond() const {
1122
+ return Expr(subtree(0));
1123
+ }
1124
+ Expr true_expr() const {
1125
+ return Expr(subtree(1));
1126
+ }
1127
+ Expr false_expr() const {
1128
+ return Expr(subtree(2));
1129
+ }
1130
+ static TernaryIf create(
1131
+ const SourceRange& range,
1132
+ const Expr& cond,
1133
+ const Expr& true_expr,
1134
+ const Expr& false_expr) {
1135
+ return TernaryIf(
1136
+ Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr}));
1137
+ };
1138
+ };
1139
+
1140
+ struct ListLiteral : public Expr {
1141
+ explicit ListLiteral(const TreeRef& tree) : Expr(tree) {
1142
+ tree_->match(TK_LIST_LITERAL);
1143
+ }
1144
+ List<Expr> inputs() const {
1145
+ return subtree(0);
1146
+ }
1147
+ static ListLiteral create(
1148
+ const SourceRange& range,
1149
+ const List<Expr>& inputs) {
1150
+ return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs}));
1151
+ }
1152
+ };
1153
+
1154
+ struct TupleLiteral : public Expr {
1155
+ explicit TupleLiteral(const TreeRef& tree) : Expr(tree) {
1156
+ tree_->match(TK_TUPLE_LITERAL);
1157
+ }
1158
+ List<Expr> inputs() const {
1159
+ return subtree(0);
1160
+ }
1161
+ static TupleLiteral create(
1162
+ const SourceRange& range,
1163
+ const List<Expr>& inputs) {
1164
+ return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs}));
1165
+ }
1166
+ };
1167
+
1168
+ struct DictLiteral : public Expr {
1169
+ explicit DictLiteral(const TreeRef& tree) : Expr(tree) {
1170
+ tree_->match(TK_DICT_LITERAL);
1171
+ }
1172
+ List<Expr> key_inputs() const {
1173
+ return subtree(0);
1174
+ }
1175
+ List<Expr> value_inputs() const {
1176
+ return subtree(1);
1177
+ }
1178
+ static DictLiteral create(
1179
+ const SourceRange& range,
1180
+ const List<Expr>& keys,
1181
+ const List<Expr>& values) {
1182
+ return DictLiteral(
1183
+ Compound::create(TK_DICT_LITERAL, range, {keys, values}));
1184
+ }
1185
+ };
1186
+
1187
+ struct Starred : public Expr {
1188
+ explicit Starred(const TreeRef& tree) : Expr(tree) {
1189
+ tree_->match(TK_STARRED);
1190
+ }
1191
+ Expr expr() const {
1192
+ return Expr(subtree(0));
1193
+ }
1194
+ static Starred create(const SourceRange& range, const Expr& expr) {
1195
+ return Starred(Compound::create(TK_STARRED, range, {expr}));
1196
+ }
1197
+ };
1198
+
1199
+ struct Delete : public Stmt {
1200
+ explicit Delete(const TreeRef& tree) : Stmt(tree) {
1201
+ tree_->match(TK_DELETE);
1202
+ }
1203
+ List<Expr> targets() const {
1204
+ return subtree(0);
1205
+ }
1206
+ static Delete create(const SourceRange& range, const List<Expr>& targets) {
1207
+ return Delete(Compound::create(TK_DELETE, range, {targets}));
1208
+ }
1209
+ };
1210
+
1211
+ /*
1212
+ * NOTE: transforming PEP 604 union into equivalent union type
1213
+ *
1214
+ * NOTE: Union[int, float] parses into:
1215
+ * <EXPR> expr:(subscript
1216
+ * (variable (ident Union))
1217
+ * (list
1218
+ * (variable (ident int))
1219
+ * (variable (ident float))))
1220
+ * <KIND> subscript
1221
+ *
1222
+ * NOTE: (int | float) parses into:
1223
+ * <EXPR> expr:(|
1224
+ * (variable (ident int))
1225
+ * (variable (ident float)))
1226
+ * <KIND> |
1227
+ */
1228
+
1229
+ inline void _flatten_pep604_union(
1230
+ const torch::jit::Expr& node,
1231
+ std::vector<torch::jit::Expr>* result) {
1232
+ // flatten possibly nested union expressions like (int | (float | str))
1233
+ // into a flat list of expressions like [int, float, str]
1234
+ if (node.kind() == '|') {
1235
+ auto as_binop = torch::jit::BinOp(node);
1236
+ _flatten_pep604_union(as_binop.lhs(), result);
1237
+ _flatten_pep604_union(as_binop.rhs(), result);
1238
+ } else {
1239
+ result->push_back(node);
1240
+ }
1241
+ }
1242
+
1243
+ inline std::vector<Expr> get_pep604_union_members(const Expr& node) {
1244
+ std::vector<Expr> result;
1245
+ _flatten_pep604_union(node, &result);
1246
+ return result;
1247
+ }
1248
+
1249
+ // Flattens a PEP 604 union into a classical union.
1250
+ // For example, ((x | y) | z) is transformed into Union[x, y, z].
1251
+ inline Expr pep604union_to_union(const Expr& expr) {
1252
+ // noop if not a pep604 union
1253
+ if (expr.kind() != '|')
1254
+ return expr;
1255
+
1256
+ // In order to support unions with more than 2 operands ((x|y)|z), we need to
1257
+ // recursively flatten the tree of | expressions.
1258
+ auto members = get_pep604_union_members(expr);
1259
+ auto synthesised_union = Subscript::create(
1260
+ expr.range(),
1261
+ Var::create(expr.range(), Ident::create(expr.range(), "Union")),
1262
+ List<Expr>::create(expr.range(), members));
1263
+ return std::move(synthesised_union);
1264
+ }
1265
+
1266
+ } // namespace jit
1267
+ } // namespace torch
1268
+
1269
+ namespace std {
1270
+
1271
+ template <typename T>
1272
+ struct iterator_traits<torch::jit::ListIterator<T>>
1273
+ : std::iterator_traits<torch::jit::TreeList::const_iterator> {};
1274
+
1275
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <caffe2/serialize/versions.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/api/module.h>
6
+
7
+ #include <cstdint>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ // Maps the given symbol into an implementation of its behavior at the
12
+ // given version.
13
+ // See note [Versioned Symbols]
14
+ TORCH_API Symbol
15
+ get_symbol_for_version(const Symbol name, const uint64_t version);
16
+
17
+ // Maps the given kind to the minimum version that supports it.
18
+ // See note [Dynamic Versions and torch.jit.save vs. torch.save]
19
+ TORCH_API uint64_t get_min_version_for_kind(const NodeKind& kind);
20
+ } // namespace jit
21
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/alias_info.h>
4
+ #include <c10/util/flat_hash_map.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/type_hashing.h>
7
+ #include <torch/csrc/jit/passes/create_functional_graphs.h>
8
+ #include <torch/csrc/jit/passes/utils/memory_dag.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * Alias analysis pass.
15
+ *
16
+ * This pass produces an AliasDb that contains aliasing and mutation
17
+ * information about the graph. Users can use this information to determine
18
+ * whether mutations to the graph are safe, i.e. they don't reorder/change
19
+ * nodes in a way that affects output.
20
+ *
21
+ * Every value with a mutable type (Tensors, Lists, Tuples, etc.) will be
22
+ * associated with one or more "alias sets". If two values share an alias set,
23
+ * that means they may alias, implying that a mutation to one value cannot be
24
+ * reordered past a use of the other. Only reordering two reads of an alias set
25
+ * is considered safe.
26
+ *
27
+ * There is a special alias set called the "wildcard set", which indicates that
28
+ * we're not sure what this value may alias. To be conservative, we consider the
29
+ * wildcard alias set as potentially aliasing any other wildcard value within
30
+ * the same type class. Whenever a value becomes contained by another value,
31
+ * such as when a Tensor is appended to a List[Tensor], the contained element
32
+ * becomes part of the wildcard set.
33
+ *
34
+ * Values that contain other mutable types, such as List[Tensor], are
35
+ * initialized as containing the Wildcard set for all contained mutable types.
36
+ *
37
+ * The AliasDb API references the idea of "mutable" vs "immutable"
38
+ * types. "Mutable" means that the object's value can change, while
39
+ * "immutable" means that the value is fixed. (For example, `List` is
40
+ * mutable, so you can add and delete elements from it. On the other
41
+ * hand, you can't modify a Tuple once you create it, making `Tuple` an
42
+ * immutable container.)
43
+ *
44
+ * `isFrozen` - if the Module is frozen then consider attributes as freshly
45
+ * created objects. Freezing API invokes alias analysis to check if they are
46
+ * mutated internally.
47
+ *
48
+ * `descendFunctionCalls` - recursively analyze function and method calls
49
+ * instead of conservative analysis. Generally analysis should be done after
50
+ * inlining so the implmentation for recursive analysis is unoptimized.
51
+ */
52
+ class AliasDb {
53
+ public:
54
+ TORCH_API explicit AliasDb(
55
+ std::shared_ptr<Graph> graphi,
56
+ bool isFrozen = false,
57
+ bool descendFunctionCalls = false);
58
+ TORCH_API ~AliasDb();
59
+
60
+ // There are limitations to what effects the alias analysis can track. Two
61
+ // kinds of nodes may have untracked effects:
62
+ // 1. Nodes that write to a value that may alias the graph inputs (since
63
+ // the inputs can be used outside the graph).
64
+ // 2. Nodes that write to something in the wildcard set.
65
+ //
66
+ // These nodes are considered not safe to eliminate or mutate under any
67
+ // circumstances.
68
+ bool writesToWildcard(Node* n) const;
69
+
70
+ // Does `n` write to an alias of one of the values in `vs`?
71
+ // if `recurseBlocks` is true, consider writes on the nodes in `n`s sub-blocks
72
+ TORCH_API bool writesToAlias(Node* n, const ValueSet& vs) const;
73
+
74
+ // Does `a` and `b` potentially share a memory location or do either
75
+ // hold in memory any element that exists in the other
76
+ TORCH_API bool mayContainAlias(Value* a, Value* b) const;
77
+
78
+ TORCH_API bool mayContainAlias(Value* a, const at::ArrayRef<Value*> b) const;
79
+
80
+ // Do any values in group `a` share a memory location or hold in memory
81
+ // any element that exists in group `b`
82
+ TORCH_API bool mayContainAlias(
83
+ const at::ArrayRef<Value*> a,
84
+ const at::ArrayRef<Value*> b) const;
85
+
86
+ // Do `a` and `b` potentially share a memory location?
87
+ TORCH_API bool mayAlias(const Value* a, const Value* b) const;
88
+ // Do any values in group `a` potentially share a memory location with any
89
+ // value in group `b`? i.e. may they overlap?
90
+ TORCH_API bool mayAlias(const ValueSet& a, const ValueSet& b) const;
91
+
92
+ // Do any nodes write to an alias set input to `n`?
93
+ TORCH_API bool hasInputWriters(const Node* n) const;
94
+
95
+ // Do any nodes write to an alias set output by `n`?
96
+ TORCH_API bool hasOutputWriters(const Node* n) const;
97
+
98
+ // Do any nodes write to an alias set inputed/outputed by `n`?
99
+ TORCH_API bool hasWriters(const Node* n) const;
100
+
101
+ // Do any nodes write to `v`s memory location?
102
+ TORCH_API bool hasWriters(const Value* v) const;
103
+
104
+ // Is the operation in-place? i.e. doesn't write anywhere but locations it
105
+ // reads from.
106
+ TORCH_API bool isMutable(Node* n) const;
107
+
108
+ TORCH_API bool escapesScope(const at::ArrayRef<Value*>& vs) const;
109
+
110
+ // Is it safe to change whether `a` and `b` alias each other ?
111
+ TORCH_API bool safeToChangeAliasingRelationship(
112
+ const at::ArrayRef<Value*>& a,
113
+ const at::ArrayRef<Value*>& b) const;
114
+
115
+ // Move `n` (already in the graph) after `movePoint` in the topological order.
116
+ //
117
+ // Tries to preserve value dependencies, so other nodes might be moved. We
118
+ // make two guarantees about the postcondition of the node list:
119
+ // - `n` is directly after `movePoint`.
120
+ // - only nodes between `n` and `movePoint` have been moved.
121
+ //
122
+ // Returns `false` if it's impossible to move `n` after `MovePoint` without
123
+ // violating dependencies, otherwise executes the move and returns `true`
124
+ TORCH_API bool moveAfterTopologicallyValid(Node* n, Node* movePoint);
125
+ TORCH_API bool moveBeforeTopologicallyValid(Node* n, Node* movePoint);
126
+
127
+ bool couldMoveAfterTopologically(Node* n, Node* movePoint);
128
+ bool couldMoveBeforeTopologically(Node* n, Node* movePoint);
129
+
130
+ // For debugging: print alias db state to stdout
131
+ TORCH_API void dump() const;
132
+ TORCH_API std::string toString() const;
133
+
134
+ // Generates a DOT (www.graphviz.org) graph representation
135
+ //
136
+ // Returns `true` if the output file was successfully generated
137
+ //
138
+ // WARNING: The output dot file path can't include shell specific notations,
139
+ // for example you can't use "~/temp/aliasdb.dot"
140
+ // (instead, use "/home/user/temp/aliasdb.dot")
141
+ //
142
+ TORCH_API bool dumpToGraphvizFile(const char* filename) const;
143
+ TORCH_API std::string toGraphviz() const;
144
+
145
+ // Returns `true` if the given element is mutable or if it is a
146
+ // container type with an internal mutable element (e.g.
147
+ // `Tuple[int, Tensor]` has an internal mutable type `Tensor`, so
148
+ // it would be considered a "mutable type" in AliasDb)
149
+ static bool isMutableType(const Value* v);
150
+ static bool isMutableType(const TypePtr& type);
151
+
152
+ /**
153
+ * Mutation API
154
+ *
155
+ * These methods allow you to update AliasDb in-place if you are performing
156
+ * graph mutation.
157
+ *
158
+ * WARNING: These methods should be considered INTERNAL. They do not perform
159
+ * very many correctness checks, the user is responsible for making sure they
160
+ * are updating AliasDb correctly. `Lint()`ing the AliasDb can help with
161
+ * this.
162
+ */
163
+ // Copy `existing`s aliasing info to `new_value`, and remove `existing`.
164
+ TORCH_API void replaceWithNewValue(Value* existing, Value* new_value);
165
+ // Copy `from`s aliasing info to `to`.
166
+ TORCH_API void copyValue(Value* from, Value* to);
167
+ // Create a new `value` that does not alias anything else.
168
+ TORCH_API void createValue(const Value* value);
169
+
170
+ // Enable more precise treatment of prim::TupleConstruct.
171
+ void enablePreciseTupleContainerAnalysis();
172
+
173
+ friend struct MutationRemover;
174
+
175
+ private:
176
+ // Helper for topologically-safe node moves.
177
+ class WorkingSet;
178
+ enum class MoveSide { BEFORE, AFTER };
179
+ bool tryMove(Node* toMove, Node* movePoint, MoveSide moveSide, bool dryRun);
180
+ void move(Node* toMove, Node* movePoint, MoveSide moveSide);
181
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
182
+
183
+ bool isMutableTypeInternal(const Value* v) const;
184
+ bool isMutableTypeInternal(const TypePtr& type) const;
185
+
186
+ /**
187
+ * Write and read internal API
188
+ */
189
+ // Get all the values that `n` writes to.
190
+ // NOTE: this only returns values directly written to, not aliases thereof
191
+ //
192
+ // if `recurseBlocks` is true, gather writes on the nodes in `n`s sub-blocks
193
+ MemoryLocations getWrites(Node* n) const;
194
+ void getWritesImpl(Node* n, MemoryLocations& ret) const;
195
+ // Register the fact that `n` writes to `v`.
196
+ void registerWrite(const Value* v, Node* n, bool writeToContained = false);
197
+ // Get all the values that `n` reads from.
198
+ // if `recurseBlocks` is true, gather reads on the nodes in `n`s sub-blocks
199
+ MemoryLocations getReads(Node* n) const;
200
+ void getReadsImpl(Node* n, MemoryLocations& ret) const;
201
+
202
+ /**
203
+ * Wildcard methods
204
+ */
205
+ // Register `v` as a wildcard value.
206
+ c10::optional<Element*> setWildcard(const Value* v);
207
+
208
+ // Is this a value which will not alias?
209
+ bool nonAliasingValue(const Value* elem) const;
210
+
211
+ /**
212
+ * Special analysis methods
213
+ */
214
+ void analyze(const std::shared_ptr<Graph>& graph);
215
+ void analyze(Block* block);
216
+ void analyze(Node* node);
217
+ void analyzeImpl(Node* node);
218
+ void analyzeIf(Node* node);
219
+ void analyzeLoop(Node* node);
220
+ void analyzeSubgraph(Node* node, std::shared_ptr<Graph> subgraph);
221
+ void analyzeSubgraph(Node* node);
222
+ void analyzeCreator(Node* node);
223
+ void analyzeExtractor(Node* node);
224
+ void analyzeChunk(Node* node);
225
+ void analyzeBroadcastingChunk(Node* node);
226
+ void analyzeFork(Node* node);
227
+ void analyzeWait(Node* node);
228
+ void analyzeAwaitable(Node* node);
229
+ void analyzeAwaitableWait(Node* node);
230
+ void analyzeRpcAsync(Node* node);
231
+ void analyzeBatchNorm(Node* node);
232
+ void analyzeInstanceNorm(Node* node);
233
+ void analyzeGradOf(Node* node);
234
+ void analyzeSetAttr(Node* node);
235
+ void analyzeConservative(Node* node);
236
+ void analyzeContainerConstruct(Node* node);
237
+ bool tryRegisteredAnalysis(Node* node);
238
+
239
+ /**
240
+ * Alias manipulation methods
241
+ */
242
+ void makeAllAlias(const std::vector<Value*>& values);
243
+ void makePointerTo(const Value* value, const Value* to);
244
+ TORCH_API void addToContainedElements(
245
+ const Value* element,
246
+ const Value* container);
247
+ void mapAliases(at::ArrayRef<Value*> to, at::ArrayRef<Value*> from);
248
+ void giveFreshAlias(
249
+ const Value* value,
250
+ bool add_wildcard_to_contained_elems = true);
251
+ Element* getOrCreateElement(const Value* value);
252
+
253
+ const AliasTypeSet* mapTypeToAliasTypeSetPtr(const TypePtr& type) const;
254
+ bool functionalNonEscapingListUse(const Use& use) const;
255
+ bool functionalNonEscapingTupleUse(const Use& use) const;
256
+
257
+ std::shared_ptr<Graph> graph_;
258
+
259
+ // If the Module is frozen then consider attributes as freshly created
260
+ // objects. Freezing API invokes alias analysis to check if they are mutated
261
+ // internally.
262
+ bool isFrozen_;
263
+
264
+ bool descend_function_calls_;
265
+ std::unordered_map<Graph*, std::vector<std::shared_ptr<Graph>>>
266
+ function_call_copies_;
267
+
268
+ // The points-to graph that stores aliasing relationships
269
+ std::unique_ptr<MemoryDAGBuilder> memoryDAGBuilder_;
270
+ std::unique_ptr<MemoryDAG> memoryDAG_;
271
+
272
+ // Mapping of values to MemoryDAG elements
273
+ ska::flat_hash_map<const Value*, Element*> elementMap_;
274
+ // All wildcard Elements (one for each unique mutable type)
275
+ ska::flat_hash_map<TypePtr, Element*, HashType, EqualType> wildcardIndex_;
276
+ Element* getWildcard(const TypePtr& type) const;
277
+ c10::optional<Element*> tryGetOrCreateWildcard(const TypePtr& type);
278
+ void addContainedTypesToFreshElement(
279
+ Element* container_elem,
280
+ const AliasTypeSet& mut_types);
281
+ void pointUnionTypeElementToAllContainedTypes(
282
+ Element* container_elem,
283
+ const AliasTypeSet& mut_types);
284
+
285
+ std::vector<Element*> getElements(at::ArrayRef<Value*> vs) const;
286
+ bool mayAliasWildcard(const Value* v) const;
287
+ bool mayAliasWildcard(const at::ArrayRef<Value*> vs) const;
288
+ bool hasWriters(const at::ArrayRef<Value*>& values) const;
289
+
290
+ // Cached mapping of type ptrs to their mutable types
291
+ mutable ska::flat_hash_map<TypePtr, AliasTypeSet> mapped_mutable_types_;
292
+
293
+ /**
294
+ * State for tracking write info.
295
+ */
296
+ // Write registry where the analysis can record the writes as it sees them.
297
+ // This information is later denormalized into various caches to improve query
298
+ // efficiency.
299
+ struct WriteRegistry;
300
+ std::unique_ptr<WriteRegistry> writeRegistry_;
301
+
302
+ // Map of nodes to the memory locations that they write to
303
+ using TWriteIndex = ska::flat_hash_map<Node*, MemoryLocations>;
304
+ c10::optional<TWriteIndex> writeIndex_;
305
+ // Collection of all memory locations that are written to.
306
+ c10::optional<MemoryLocations> writtenToLocationsIndex_;
307
+ void buildWrittenToLocationsIndex();
308
+
309
+ std::unordered_set<const Value*> wildcards_;
310
+
311
+ std::string getElementName(const Element* e) const;
312
+
313
+ friend void Lint(const AliasDb* db);
314
+ };
315
+
316
+ // Helper check that invariants over AliasDb are maintained.
317
+ // Useful if you are using the AliasDb mutation API and want to check you did
318
+ // the right thing.
319
+ TORCH_API void Lint(const AliasDb* db);
320
+
321
+ } // namespace jit
322
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <string>
4
+ #include <vector>
5
+
6
+ #include <ATen/core/jit_type_base.h>
7
+ #include <ATen/core/symbol.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+
11
+ namespace torch {
12
+ namespace jit {
13
+
14
+ using ::c10::Symbol;
15
+
16
+ constexpr int max_tensor_display_size = 10;
17
+
18
+ enum class AttributeKind {
19
+ f,
20
+ fs,
21
+ c,
22
+ cs,
23
+ i,
24
+ is,
25
+ s,
26
+ ss,
27
+ t,
28
+ ts,
29
+ g,
30
+ gs,
31
+ ty,
32
+ tys,
33
+ ival
34
+ };
35
+ static inline const char* toString(AttributeKind kind) {
36
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
37
+ static const char* names[] = {
38
+ "f",
39
+ "c",
40
+ "cs",
41
+ "fs",
42
+ "i",
43
+ "is",
44
+ "s",
45
+ "ss",
46
+ "t",
47
+ "ts",
48
+ "g",
49
+ "gs",
50
+ "ty",
51
+ "tys",
52
+ "ival"};
53
+ AT_ASSERT(size_t(kind) < sizeof(names) / sizeof(*names));
54
+ return names[int(kind)];
55
+ }
56
+
57
+ struct AttributeValue {
58
+ AttributeValue(Symbol name) : name(name) {}
59
+ using Ptr = std::unique_ptr<AttributeValue>;
60
+ Symbol name;
61
+ virtual AttributeKind kind() const = 0;
62
+ virtual Ptr clone() const = 0;
63
+ virtual ~AttributeValue() = default;
64
+ };
65
+
66
+ template <typename T, AttributeKind Kind>
67
+ struct ScalarAttributeValue : public AttributeValue {
68
+ using ConstructorType = T;
69
+ using ValueType = T;
70
+ ScalarAttributeValue(Symbol name, ConstructorType value_)
71
+ : AttributeValue(name), value_(std::move(value_)) {}
72
+ ValueType& value() {
73
+ return value_;
74
+ }
75
+ Ptr clone() const override {
76
+ return Ptr(new ScalarAttributeValue(name, value_));
77
+ }
78
+ AttributeKind kind() const override {
79
+ return Kind;
80
+ }
81
+
82
+ private:
83
+ ValueType value_;
84
+ };
85
+
86
+ template <typename T, AttributeKind Kind>
87
+ struct VectorAttributeValue : public AttributeValue {
88
+ using ConstructorType = std::vector<T>;
89
+ using ValueType = std::vector<T>;
90
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
91
+ VectorAttributeValue(Symbol name, ConstructorType value_)
92
+ : AttributeValue(name), value_(std::move(value_)) {}
93
+ ValueType& value() {
94
+ return value_;
95
+ }
96
+ AttributeKind kind() const override {
97
+ return Kind;
98
+ }
99
+ std::unique_ptr<AttributeValue> clone() const override {
100
+ auto copy = value_;
101
+ return Ptr(new VectorAttributeValue(name, std::move(copy)));
102
+ }
103
+
104
+ private:
105
+ ValueType value_;
106
+ };
107
+
108
+ using ComplexAttr =
109
+ ScalarAttributeValue<c10::complex<double>, AttributeKind::c>;
110
+ using ComplexValsAttr =
111
+ VectorAttributeValue<c10::complex<double>, AttributeKind::cs>;
112
+ using FloatAttr = ScalarAttributeValue<double, AttributeKind::f>;
113
+ using FloatsAttr = VectorAttributeValue<double, AttributeKind::fs>;
114
+ using IntAttr = ScalarAttributeValue<int64_t, AttributeKind::i>;
115
+ using IntsAttr = VectorAttributeValue<int64_t, AttributeKind::is>;
116
+ using StringAttr = ScalarAttributeValue<std::string, AttributeKind::s>;
117
+ using StringsAttr = VectorAttributeValue<std::string, AttributeKind::ss>;
118
+ using TensorAttr = ScalarAttributeValue<at::Tensor, AttributeKind::t>;
119
+ using TensorsAttr = VectorAttributeValue<at::Tensor, AttributeKind::ts>;
120
+ using TypeAttr = ScalarAttributeValue<c10::TypePtr, AttributeKind::ty>;
121
+ using TypesAttr = VectorAttributeValue<c10::TypePtr, AttributeKind::tys>;
122
+ using IValueAttr = ScalarAttributeValue<at::IValue, AttributeKind::ival>;
123
+
124
+ struct Graph;
125
+
126
+ // We special case Graph attributes like this because we want to ensure that
127
+ // Graph::copy() is called when we clone() these attributes.
128
+ struct TORCH_API GraphAttr : public AttributeValue {
129
+ using ConstructorType = std::shared_ptr<Graph>;
130
+ using ValueType = std::shared_ptr<Graph>;
131
+ GraphAttr(Symbol name, ConstructorType value_)
132
+ : AttributeValue(name), value_(std::move(value_)) {}
133
+ ValueType& value() {
134
+ return value_;
135
+ }
136
+ Ptr clone() const override;
137
+ AttributeKind kind() const override {
138
+ return AttributeKind::g;
139
+ }
140
+
141
+ private:
142
+ std::shared_ptr<Graph> value_;
143
+ };
144
+
145
+ struct TORCH_API GraphsAttr : public AttributeValue {
146
+ using ConstructorType = std::vector<std::shared_ptr<Graph>>;
147
+ using ValueType = std::vector<std::shared_ptr<Graph>>;
148
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
149
+ GraphsAttr(Symbol name, ConstructorType value_)
150
+ : AttributeValue(name), value_(std::move(value_)) {}
151
+ ValueType& value() {
152
+ return value_;
153
+ }
154
+ AttributeKind kind() const override {
155
+ return AttributeKind::gs;
156
+ }
157
+ std::unique_ptr<AttributeValue> clone() const override;
158
+
159
+ private:
160
+ ValueType value_;
161
+ };
162
+
163
+ struct IRAttributeError : public std::exception {
164
+ IRAttributeError(Symbol name, bool defined) {
165
+ std::stringstream ss;
166
+ // NOLINTNEXTLINE(bugprone-branch-clone)
167
+ if (!defined) {
168
+ ss << "required keyword attribute '" << name.toUnqualString()
169
+ << "' is undefined";
170
+ } else {
171
+ ss << "required keyword attribute '" << name.toUnqualString()
172
+ << "' has the wrong type";
173
+ }
174
+ msg = ss.str();
175
+ }
176
+ const char* what() const noexcept override {
177
+ return msg.c_str();
178
+ }
179
+
180
+ private:
181
+ std::string msg;
182
+ };
183
+ } // namespace jit
184
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ // helpers for handling constants in the IR
9
+ // - create constant nodes from ints, floats, complex, intlist, Tensors, and
10
+ // other types
11
+ // - implement primitive constant ops.
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ using ::c10::IValue;
16
+
17
+ struct Graph;
18
+ struct Value;
19
+
20
+ // thrown when insertConstant cannot encode the IValue into a graph
21
+ struct TORCH_API constant_not_supported_error : public std::runtime_error {
22
+ using runtime_error::runtime_error;
23
+ };
24
+
25
+ TORCH_API Value* insertConstant(
26
+ Graph& g,
27
+ const IValue& val,
28
+ c10::optional<SourceRange> loc = c10::nullopt,
29
+ c10::optional<ScopePtr> scope = c10::nullopt);
30
+
31
+ // note: prefer g.insertConsant(val, loc) which does exactly the same thing
32
+ // this function is only declared/defined here because its implementation is
33
+ // closely related to the implementation of prim::Constant that is also in
34
+ // constants.cpp.
35
+ //
36
+ // returns a c10::nullopt if the IValue kind cannot be inserted as a constant
37
+ TORCH_API c10::optional<Value*> tryInsertConstant(
38
+ Graph& g,
39
+ const IValue& val,
40
+ c10::optional<SourceRange> loc = c10::nullopt,
41
+ c10::optional<ScopePtr> scope = c10::nullopt);
42
+
43
+ ////////////////////////////////////////////////////////////////////////////////
44
+ // Helper for retrieving constants
45
+ ////////////////////////////////////////////////////////////////////////////////
46
+
47
+ // attempt to convert a (possibly constant) Value* into an interpreter value
48
+ // (IValue). returns c10::nullopt if the Value* was not constant
49
+ TORCH_API c10::optional<IValue> toIValue(const Value* v);
50
+
51
+ // if a value is a constant then try to turn into type T using the
52
+ // same rules as the interpreter
53
+ template <typename T>
54
+ c10::optional<T> constant_as(const Value* v) {
55
+ if (auto ivalue = toIValue(v)) {
56
+ return ivalue->to<T>();
57
+ }
58
+ return c10::nullopt;
59
+ }
60
+ } // namespace jit
61
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Intrusive doubly linked lists with sane reverse iterators.
9
+ // The header file is named generic_graph_node_list.h because it is ONLY
10
+ // used for Graph's Node lists, and if you want to use it for other
11
+ // things, you will have to do some refactoring.
12
+ //
13
+ // At the moment, the templated type T must support a few operations:
14
+ //
15
+ // - It must have a field: T* next_in_graph[2] = { nullptr, nullptr };
16
+ // which are used for the intrusive linked list pointers.
17
+ //
18
+ // - It must have a method 'destroy()', which removes T from the
19
+ // list and frees a T.
20
+ //
21
+ // In practice, we are only using it with Node and const Node. 'destroy()'
22
+ // needs to be renegotiated if you want to use this somewhere else.
23
+ //
24
+ // Regardless of the iteration direction, iterators always physically point
25
+ // to the element they logically point to, rather than
26
+ // the off-by-one behavior for all standard library reverse iterators like
27
+ // std::list.
28
+
29
+ // The list is includes two sentinel nodes, one at the beginning and one at the
30
+ // end with a circular link between them. It is an error to insert nodes after
31
+ // the end sentinel node but before the beginning node:
32
+
33
+ // Visualization showing only the next() links:
34
+ // HEAD -> first -> second -> ... -> last -> TAIL
35
+ // ^------------------------------------------
36
+
37
+ // Visualization showing only the prev() links:
38
+ // HEAD <- first <- second <- ... <- last <- TAIL
39
+ // ------------------------------------------^
40
+
41
+ static constexpr int kNextDirection = 0;
42
+ static constexpr int kPrevDirection = 1;
43
+
44
+ template <typename T>
45
+ struct generic_graph_node_list;
46
+
47
+ template <typename T>
48
+ struct generic_graph_node_list_iterator;
49
+
50
+ struct Node;
51
+ using graph_node_list = generic_graph_node_list<Node>;
52
+ using const_graph_node_list = generic_graph_node_list<const Node>;
53
+ using graph_node_list_iterator = generic_graph_node_list_iterator<Node>;
54
+ using const_graph_node_list_iterator =
55
+ generic_graph_node_list_iterator<const Node>;
56
+
57
+ template <typename T>
58
+ struct generic_graph_node_list_iterator {
59
+ generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {}
60
+ generic_graph_node_list_iterator(T* cur, int d) : cur(cur), d(d) {}
61
+ generic_graph_node_list_iterator(
62
+ const generic_graph_node_list_iterator& rhs) = default;
63
+ generic_graph_node_list_iterator(
64
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
65
+ generic_graph_node_list_iterator& operator=(
66
+ const generic_graph_node_list_iterator& rhs) = default;
67
+ generic_graph_node_list_iterator& operator=(
68
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
69
+ T* operator*() const {
70
+ return cur;
71
+ }
72
+ T* operator->() const {
73
+ return cur;
74
+ }
75
+ generic_graph_node_list_iterator& operator++() {
76
+ AT_ASSERT(cur);
77
+ cur = cur->next_in_graph[d];
78
+ return *this;
79
+ }
80
+ generic_graph_node_list_iterator operator++(int) {
81
+ generic_graph_node_list_iterator old = *this;
82
+ ++(*this);
83
+ return old;
84
+ }
85
+ generic_graph_node_list_iterator& operator--() {
86
+ AT_ASSERT(cur);
87
+ cur = cur->next_in_graph[reverseDir()];
88
+ return *this;
89
+ }
90
+ generic_graph_node_list_iterator operator--(int) {
91
+ generic_graph_node_list_iterator old = *this;
92
+ --(*this);
93
+ return old;
94
+ }
95
+
96
+ // erase cur without invalidating this iterator
97
+ // named differently from destroy so that ->/. bugs do not
98
+ // silently cause the wrong one to be called.
99
+ // iterator will point to the previous entry after call
100
+ void destroyCurrent() {
101
+ T* n = cur;
102
+ cur = cur->next_in_graph[reverseDir()];
103
+ n->destroy();
104
+ }
105
+ generic_graph_node_list_iterator reverse() {
106
+ return generic_graph_node_list_iterator(cur, reverseDir());
107
+ }
108
+
109
+ private:
110
+ int reverseDir() {
111
+ return d == kNextDirection ? kPrevDirection : kNextDirection;
112
+ }
113
+ T* cur;
114
+ int d; // direction 0 is forward 1 is reverse, see next_in_graph
115
+ };
116
+
117
+ template <typename T>
118
+ struct generic_graph_node_list {
119
+ using iterator = generic_graph_node_list_iterator<T>;
120
+ using const_iterator = generic_graph_node_list_iterator<const T>;
121
+ generic_graph_node_list_iterator<T> begin() {
122
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[d], d);
123
+ }
124
+ generic_graph_node_list_iterator<const T> begin() const {
125
+ return generic_graph_node_list_iterator<const T>(head->next_in_graph[d], d);
126
+ }
127
+ generic_graph_node_list_iterator<T> end() {
128
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[!d], d);
129
+ }
130
+ generic_graph_node_list_iterator<const T> end() const {
131
+ return generic_graph_node_list_iterator<const T>(
132
+ head->next_in_graph[!d], d);
133
+ }
134
+ generic_graph_node_list_iterator<T> rbegin() {
135
+ return reverse().begin();
136
+ }
137
+ generic_graph_node_list_iterator<const T> rbegin() const {
138
+ return reverse().begin();
139
+ }
140
+ generic_graph_node_list_iterator<T> rend() {
141
+ return reverse().end();
142
+ }
143
+ generic_graph_node_list_iterator<const T> rend() const {
144
+ return reverse().end();
145
+ }
146
+ generic_graph_node_list reverse() {
147
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
148
+ }
149
+ const generic_graph_node_list reverse() const {
150
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
151
+ }
152
+ T* front() {
153
+ return head->next_in_graph[d];
154
+ }
155
+ const T* front() const {
156
+ return head->next_in_graph[d];
157
+ }
158
+ T* back() {
159
+ return head->next_in_graph[!d];
160
+ }
161
+ const T* back() const {
162
+ return head->next_in_graph[!d];
163
+ }
164
+ generic_graph_node_list(T* head, int d) : head(head), d(d) {}
165
+
166
+ private:
167
+ T* head; // both head and tail are sentinel nodes
168
+ // the first real node is head->next_in_graph[d]
169
+ // the tail sentinel is head->next_in_graph[!d]
170
+ int d;
171
+ };
172
+
173
+ template <typename T>
174
+ static inline bool operator==(
175
+ generic_graph_node_list_iterator<T> a,
176
+ generic_graph_node_list_iterator<T> b) {
177
+ return *a == *b;
178
+ }
179
+
180
+ template <typename T>
181
+ static inline bool operator!=(
182
+ generic_graph_node_list_iterator<T> a,
183
+ generic_graph_node_list_iterator<T> b) {
184
+ return *a != *b;
185
+ }
186
+
187
+ } // namespace jit
188
+ } // namespace torch
189
+
190
+ namespace std {
191
+
192
+ template <typename T>
193
+ struct iterator_traits<torch::jit::generic_graph_node_list_iterator<T>> {
194
+ using difference_type = int64_t;
195
+ using value_type = T*;
196
+ using pointer = T**;
197
+ using reference = T*&;
198
+ using iterator_category = bidirectional_iterator_tag;
199
+ };
200
+
201
+ } // namespace std