applied-ai-018 commited on
Commit
49cb9b3
·
verified ·
1 Parent(s): b3504fe

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h +9 -0
  4. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h +35 -0
  5. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h +213 -0
  6. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h +1158 -0
  7. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h +119 -0
  8. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h +20 -0
  9. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h +126 -0
  10. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h +50 -0
  11. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h +97 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h +228 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h +45 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h +9 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h +7 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h +6 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h +6 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h +406 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h +150 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h +80 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h +128 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h +283 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h +102 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h +36 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h +295 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h +104 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h +346 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h +91 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h +499 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h +115 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h +29 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h +61 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h +129 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h +115 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h +217 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h +304 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h +22 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h +934 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_cloner.h +65 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h +66 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_printer.h +130 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_simplifier.h +554 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h +58 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h +64 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h +382 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_codegen.h +143 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_jit.h +77 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest.h +606 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest_randomization.h +13 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/lowerings.h +49 -0
ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b02933cf8d72b33232bdea1819375ba11fd1c881b6d08f552ccde9d82b5a954
3
+ size 33555612
ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:300476c759a9584056de01692721be4fb0c87c24610b227124cb5a950e8e36a9
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/pybind.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ void initJITBindings(PyObject* module);
8
+
9
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <pybind11/pybind11.h>
3
+ #include <pybind11/stl.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/utils/pybind.h>
6
+
7
+ namespace py = pybind11;
8
+
9
+ namespace torch::jit {
10
+
11
+ inline c10::optional<Module> as_module(py::handle obj) {
12
+ static py::handle ScriptModule =
13
+ py::module::import("torch.jit").attr("ScriptModule");
14
+ if (py::isinstance(obj, ScriptModule)) {
15
+ return py::cast<Module>(obj.attr("_c"));
16
+ }
17
+ return c10::nullopt;
18
+ }
19
+
20
+ inline c10::optional<Object> as_object(py::handle obj) {
21
+ static py::handle ScriptObject =
22
+ py::module::import("torch").attr("ScriptObject");
23
+ if (py::isinstance(obj, ScriptObject)) {
24
+ return py::cast<Object>(obj);
25
+ }
26
+
27
+ static py::handle RecursiveScriptClass =
28
+ py::module::import("torch.jit").attr("RecursiveScriptClass");
29
+ if (py::isinstance(obj, RecursiveScriptClass)) {
30
+ return py::cast<Object>(obj.attr("_c"));
31
+ }
32
+ return c10::nullopt;
33
+ }
34
+
35
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/symbol.h>
7
+ #include <c10/util/irange.h>
8
+ #include <torch/csrc/DynamicTypes.h>
9
+ #include <torch/csrc/THP.h>
10
+ #include <torch/csrc/autograd/variable.h>
11
+ #include <torch/csrc/jit/frontend/tracer.h>
12
+ #include <torch/csrc/jit/python/pybind_utils.h>
13
+ #include <torch/csrc/utils/pybind.h>
14
+
15
+ #include <pybind11/functional.h>
16
+ #include <pybind11/pybind11.h>
17
+ #include <pybind11/stl.h>
18
+
19
+ namespace py = pybind11;
20
+
21
+ namespace torch::jit {
22
+
23
+ // This is a variant of shared_ptr that "sees through" a wrapper.
24
+ // We use it to convert Value, Node, Block and node to "wrapped" Python
25
+ // values. When we destruct the C++ object, the wrapper's pointer will
26
+ // be set to 0 and any future dereferencing will throw. We need this
27
+ // because the Python objects may hang around after the C++ object
28
+ // has already been destroyed.
29
+ // This also needs the magic type_caster below, which is from the
30
+ // workaround offered in https://github.com/pybind/pybind11/issues/2751
31
+ template <typename T>
32
+ class unwrapping_shared_ptr {
33
+ static_assert(
34
+ std::is_same<T, torch::jit::Value>::value ||
35
+ std::is_same<T, torch::jit::Node>::value ||
36
+ std::is_same<T, torch::jit::Block>::value,
37
+ "unwrapping type only defined for Graph object types");
38
+
39
+ private:
40
+ std::shared_ptr<torch::jit::Wrap<T>> impl;
41
+
42
+ public:
43
+ unwrapping_shared_ptr() : impl({}) {}
44
+ explicit unwrapping_shared_ptr(T* p) : impl(p->wrap()) {
45
+ impl->clear_cb = &clear_registered_instances;
46
+ }
47
+ T* get() const {
48
+ if (!impl->elem) {
49
+ throw std::logic_error("has been invalidated");
50
+ }
51
+ return impl->elem;
52
+ }
53
+ // we need to disable the overloaded & for PyBind11 < 2.3 due.
54
+ // see https://github.com/pybind/pybind11/pull/1435
55
+ #if (PYBIND11_VERSION_MAJOR > 2) || \
56
+ ((PYBIND11_VERSION_MAJOR == 2) && (PYBIND11_VERSION_MINOR >= 3))
57
+ T** operator&() {
58
+ if (!impl->elem) {
59
+ throw std::logic_error("has been invalidated");
60
+ }
61
+ return &(impl->elem);
62
+ }
63
+ #endif
64
+ };
65
+
66
+ } // namespace torch::jit
67
+
68
+ PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true);
69
+
70
+ namespace pybind11::detail {
71
+
72
+ #define CREATE_UNWRAPPING_CASTER(Class) \
73
+ template <> \
74
+ struct type_caster<Class> : public type_caster_base<Class> { \
75
+ public: \
76
+ using type = Class; \
77
+ using holder_type = torch::jit::unwrapping_shared_ptr<Class>; \
78
+ \
79
+ bool load(handle src, bool convert) { \
80
+ return load_impl<type_caster<Class>>(src, convert); \
81
+ } \
82
+ \
83
+ explicit operator type*() { \
84
+ return static_cast<type*>(value); \
85
+ } \
86
+ explicit operator type&() { \
87
+ return *static_cast<type*>(value); \
88
+ } \
89
+ \
90
+ protected: \
91
+ friend class type_caster_generic; \
92
+ \
93
+ bool load_value(value_and_holder&& v_h) { \
94
+ if (v_h.holder_constructed()) { \
95
+ value = v_h.template holder<holder_type>().get(); \
96
+ return true; \
97
+ } else { \
98
+ throw cast_error( \
99
+ "Unable to cast from non-held to held instance (#Class& to Holder<#Class>)"); \
100
+ } \
101
+ } \
102
+ }
103
+
104
+ CREATE_UNWRAPPING_CASTER(torch::jit::Node);
105
+ CREATE_UNWRAPPING_CASTER(torch::jit::Value);
106
+ CREATE_UNWRAPPING_CASTER(torch::jit::Block);
107
+
108
+ #undef CREATE_UNWRAPPING_CASTER
109
+
110
+ template <>
111
+ struct type_caster<torch::jit::IValue> {
112
+ public:
113
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
114
+ PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue"));
115
+
116
+ bool load(handle src, bool) {
117
+ try {
118
+ value = torch::jit::toTypeInferredIValue(src);
119
+ return true;
120
+ } catch (std::exception& e) {
121
+ return false;
122
+ }
123
+ }
124
+
125
+ static handle cast(
126
+ torch::jit::IValue src,
127
+ return_value_policy /* policy */,
128
+ handle /* parent */) {
129
+ return torch::jit::toPyObject(std::move(src)).release();
130
+ }
131
+ };
132
+
133
+ template <>
134
+ struct type_caster<torch::jit::Symbol> {
135
+ public:
136
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
137
+ PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol"));
138
+
139
+ bool load(handle src, bool) {
140
+ // TODO: Is there a way to py::cast that doesn't raise an exception on
141
+ // failure? Can we catch pybind11::cast_error here instead?
142
+ std::string src_str;
143
+ try {
144
+ src_str = py::cast<std::string>(src);
145
+ } catch (std::exception& e) {
146
+ return false;
147
+ }
148
+ value = torch::jit::Symbol::fromQualString(src_str);
149
+ return true;
150
+ }
151
+
152
+ static handle cast(
153
+ torch::jit::Symbol src,
154
+ return_value_policy /* policy */,
155
+ handle /* parent */) {
156
+ return py::cast(std::string(src.toQualString()), return_value_policy::copy)
157
+ .release();
158
+ }
159
+ };
160
+
161
+ template <>
162
+ struct type_caster<torch::jit::AttributeKind> {
163
+ public:
164
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
165
+ PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind"));
166
+
167
+ bool load(handle src, bool) {
168
+ return false;
169
+ }
170
+
171
+ static handle cast(
172
+ torch::jit::AttributeKind src,
173
+ return_value_policy /* policy */,
174
+ handle /* parent */) {
175
+ return py::cast(
176
+ std::string(torch::jit::toString(src)),
177
+ return_value_policy::copy)
178
+ .release();
179
+ }
180
+ };
181
+
182
+ // See https://github.com/pybind/pybind11/issues/637
183
+ using ListCasterBase = pybind11::detail::
184
+ list_caster<std::vector<torch::jit::Node*>, torch::jit::Node*>;
185
+ template <>
186
+ struct type_caster<std::vector<torch::jit::Node*>> : ListCasterBase {
187
+ static handle cast(
188
+ const std::vector<torch::jit::Node*>& src,
189
+ return_value_policy,
190
+ handle parent) {
191
+ return ListCasterBase::cast(src, return_value_policy::reference, parent);
192
+ }
193
+ static handle cast(
194
+ const std::vector<torch::jit::Node*>* src,
195
+ return_value_policy pol,
196
+ handle parent) {
197
+ return cast(*src, pol, parent);
198
+ }
199
+ };
200
+
201
+ } // namespace pybind11::detail
202
+
203
+ namespace torch::jit {
204
+
205
+ static inline py::tuple tuple_tail(const py::tuple& tup) {
206
+ py::tuple r(tup.size() - 1);
207
+ for (const auto i : c10::irange(1, tup.size())) {
208
+ r[i - 1] = tup[i];
209
+ }
210
+ return r;
211
+ }
212
+
213
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h ADDED
@@ -0,0 +1,1158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <ATen/core/qualified_name.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <pybind11/complex.h>
8
+ #include <pybind11/pybind11.h>
9
+ #include <pybind11/pytypes.h>
10
+ #include <torch/csrc/Device.h>
11
+ #include <torch/csrc/Dtype.h>
12
+ #include <torch/csrc/Export.h>
13
+ #include <torch/csrc/Layout.h>
14
+ #include <torch/csrc/QScheme.h>
15
+ #include <torch/csrc/Stream.h>
16
+ #include <torch/csrc/jit/api/module.h>
17
+ #include <torch/csrc/jit/frontend/schema_matching.h>
18
+ #include <torch/csrc/jit/frontend/tracer.h>
19
+ #include <torch/csrc/jit/python/module_python.h>
20
+ #include <torch/csrc/jit/python/python_custom_class.h>
21
+ #include <torch/csrc/jit/python/python_tracer.h>
22
+ #include <torch/csrc/jit/resource_guard.h>
23
+ #include <torch/csrc/jit/runtime/operator.h>
24
+ #include <torch/csrc/utils/pybind.h>
25
+ #include <torch/csrc/utils/python_arg_parser.h>
26
+ #include <torch/csrc/utils/six.h>
27
+ #ifdef USE_DISTRIBUTED
28
+ #include <torch/csrc/distributed/rpc/py_rref.h>
29
+ #include <torch/csrc/distributed/rpc/rref_impl.h>
30
+ #endif
31
+
32
+ #include <ATen/core/function_schema.h>
33
+ #include <c10/core/Stream.h>
34
+ #ifdef USE_C10D_NCCL
35
+ #include <c10/cuda/CUDACachingAllocator.h>
36
+ #include <c10/cuda/CUDAStream.h>
37
+ #endif
38
+ #include <c10/util/Exception.h>
39
+ #include <c10/util/Optional.h>
40
+ #include <c10/util/irange.h>
41
+
42
+ #include <algorithm>
43
+ #include <cstddef>
44
+ #include <string>
45
+ #include <utility>
46
+ #include <vector>
47
+
48
+ // The visibility attribute is to avoid a warning about storing a field in the
49
+ // struct that has a different visibility (from pybind) than the struct.
50
+ #ifdef _WIN32
51
+ #define VISIBILITY_HIDDEN
52
+ #else
53
+ #define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
54
+ #endif
55
+
56
+ namespace torch::jit {
57
+
58
+ using ResolutionCallback = std::function<py::object(std::string)>;
59
+
60
+ void clear_registered_instances(void* ptr);
61
+
62
+ TORCH_PYTHON_API IValue toIValue(
63
+ py::handle obj,
64
+ const TypePtr& type,
65
+ c10::optional<int32_t> N = c10::nullopt);
66
+
67
+ TORCH_PYTHON_API py::object toPyObject(IValue ivalue);
68
+
69
+ // Hack to overload the behavior of toIValue to accept Python
70
+ // numbers in places where a Tensor is expected
71
+ // See also torch::should_allow_numbers_as_tensors
72
+ class ToIValueAllowNumbersAsTensors {
73
+ bool old_;
74
+
75
+ public:
76
+ ToIValueAllowNumbersAsTensors(bool enable);
77
+ ~ToIValueAllowNumbersAsTensors();
78
+ };
79
+
80
+ // Wrap Python function to guard deref
81
+ // NB: Need VISIBILITY_HIDDEN for silencing compiler error,
82
+ // 'torch::jit::PythonFunctionGuard' declared with greater visibility than the
83
+ // type of its field 'torch::jit::PythonFunctionGuard::func_'
84
+ struct VISIBILITY_HIDDEN PythonFunctionGuard {
85
+ explicit PythonFunctionGuard(py::function func) : func_(std::move(func)) {}
86
+
87
+ ~PythonFunctionGuard() {
88
+ pybind11::gil_scoped_acquire ag;
89
+ func_.dec_ref();
90
+ // explicitly setting PyObject* to nullptr to prevent py::object's dtor to
91
+ // decref on the PyObject again.
92
+ // See Note [Destructing py::object] in python_ivalue.h
93
+ func_.ptr() = nullptr;
94
+ }
95
+
96
+ py::function func_;
97
+ };
98
+
99
+ // The PythonFutureWrapper for ivalue::Future
100
+ //
101
+ // NB: VISIBILITY_HIDDEN is for silencing compiling error,
102
+ // "error: 'torch::jit::PythonFutureWrapper' declared with greater visibility
103
+ // than the type of its field 'torch::jit::PythonFutureWrapper::unwrap_func'
104
+ // [-Werror=attributes]"
105
+ //
106
+ // NB: inherit from enable_shared_from_this because then(py::function) needs to
107
+ // get a shared_ptr from this pointer.
108
+ struct VISIBILITY_HIDDEN PythonFutureWrapper
109
+ : std::enable_shared_from_this<PythonFutureWrapper> {
110
+ using UnwrapFunc = std::function<void(py::object)>;
111
+
112
+ explicit PythonFutureWrapper(
113
+ c10::intrusive_ptr<c10::ivalue::Future> fut,
114
+ c10::optional<UnwrapFunc> unwrap_func = c10::nullopt)
115
+ : fut(std::move(fut)), unwrap_func(std::move(unwrap_func)) {}
116
+
117
+ explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete;
118
+ PythonFutureWrapper& operator=(const PythonFutureWrapper&) = delete;
119
+
120
+ bool done() {
121
+ return fut->completed();
122
+ }
123
+
124
+ py::object value() {
125
+ // acquiring GIL as toPyObject creates new py::object
126
+ // without grabbing the GIL.
127
+ py::gil_scoped_acquire acquire;
128
+ py::object py_obj = toPyObject(fut->value());
129
+ // unwrap_func is a general compositional function that takes in a
130
+ // py::object and executes some python function. It is currently mostly used
131
+ // to throw python exceptions.
132
+ if (unwrap_func) {
133
+ (*unwrap_func)(py_obj);
134
+ }
135
+ return py_obj;
136
+ }
137
+
138
+ py::object wait() {
139
+ fut->wait();
140
+ if (jit::tracer::isTracing()) {
141
+ auto graph = jit::tracer::getTracingState()->graph;
142
+
143
+ Value* fut_val = jit::tracer::getValueTrace(fut);
144
+ auto output = graph->insert(aten::wait, {fut_val});
145
+ jit::tracer::setValueTrace(fut->value(), output);
146
+ }
147
+ return value();
148
+ }
149
+
150
+ // The py::function cb arg must take a std::shared_ptr<PythonFutureWrapper>
151
+ // (i.e., torch._C.Future) as the only argument. If the type mismatches, an
152
+ // error will be thrown when waiting for the value of this returned Future.
153
+ std::shared_ptr<PythonFutureWrapper> then(py::function cb) {
154
+ // We need this an additional layer of wrapper here to guard the
155
+ // destruction of the py::function object. Because, the
156
+ // Future owns a reference to the py::function in its callback
157
+ // vector, but Future does not acquire GIL on destruction.
158
+ auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
159
+
160
+ return std::make_shared<jit::PythonFutureWrapper>(fut->then(
161
+ // Capture a copy of the ivalue::Future instead of the `this` pointer
162
+ // because the PythonFutureWrapper object could have been deleted
163
+ // when the callbacks are fired. For example, RPC only captures the
164
+ // ivalue::Future instead of PythonFutureWrapper in JitFuture's
165
+ // callback functions. Hence, if user code does not hold a reference to
166
+ // this PythonFutureWrapper object, there is no guarantee that the
167
+ // PythonFutureWrapper is still valid when running the callback.
168
+ [pyFut(this->getPtr()),
169
+ pf(std::move(pf))](c10::ivalue::Future& /* unused */) -> IValue {
170
+ try {
171
+ pybind11::gil_scoped_acquire ag;
172
+ return toIValue(pf->func_(pyFut), PyObjectType::get());
173
+ } catch (py::error_already_set& e) {
174
+ auto err = std::runtime_error(c10::str(
175
+ "Got the following error when running the callback: ",
176
+ e.what()));
177
+ {
178
+ pybind11::gil_scoped_acquire ag;
179
+ // Release ownership on py::objects and also restore Python
180
+ // Error Indicator.
181
+ e.restore();
182
+ // Clear the Python Error Indicator as we has recorded the
183
+ // exception in the response message.
184
+ PyErr_Clear();
185
+ }
186
+
187
+ throw err;
188
+ }
189
+ },
190
+ PyObjectType::get()));
191
+ }
192
+
193
+ void add_done_callback(py::function cb) {
194
+ auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
195
+ // NOLINTNEXTLINE(modernize-avoid-bind)
196
+ fut->addCallback(std::bind(
197
+ [pyFut(this->getPtr())](std::shared_ptr<PythonFunctionGuard> pf) {
198
+ try {
199
+ pybind11::gil_scoped_acquire ag;
200
+ pf->func_(pyFut);
201
+ } catch (py::error_already_set& e) {
202
+ {
203
+ pybind11::gil_scoped_acquire ag;
204
+ // Release ownership on py::objects and also restore Python
205
+ // Error Indicator.
206
+ e.restore();
207
+ // Clear the Python Error Indicator as we has recorded the
208
+ // exception in the response message.
209
+ PyErr_Clear();
210
+ }
211
+ // Log and ignore exceptions raised through the callback
212
+ LOG(ERROR) << "Got the following error when running the callback: "
213
+ << e.what();
214
+
215
+ } catch (const std::exception& e) {
216
+ // Log and ignore exceptions raised through the callback
217
+ LOG(ERROR) << "Got the following error when running the callback: "
218
+ << e.what();
219
+ }
220
+ },
221
+ std::move(pf)));
222
+ }
223
+
224
+ void markCompleted(const py::object& pyValue) {
225
+ DCHECK(PyGILState_Check());
226
+ IValue value = toIValue(pyValue, PyObjectType::get());
227
+
228
+ py::gil_scoped_release release;
229
+ fut->markCompleted(std::move(value));
230
+ }
231
+
232
+ c10::intrusive_ptr<c10::ivalue::Future> fut;
233
+ // unwrap_func works like a callback for the value returned by
234
+ // PythonFutureWrapper::wait().
235
+ c10::optional<UnwrapFunc> unwrap_func;
236
+
237
+ private:
238
+ std::shared_ptr<PythonFutureWrapper> getPtr() {
239
+ return shared_from_this();
240
+ }
241
+ };
242
+
243
+ // The PythonAwaitWrapper for ivalue::Await
244
+ //
245
+ // Expresses delayed function execution with Lazy semantic.
246
+ // i.e. Await[W] in eager mode can be used as W.
247
+ // When the attribute of W type is requested, Await[W] will return the
248
+ // attribute of W, transparently calling wait() beforehand.
249
+ // No Lazy semantic for script, explicit wait(Await[W]) -> W must be called to
250
+ // convert to type W.
251
+ //
252
+ // The Await object takes shared ownership of specified function and the
253
+ // arguments. After first call for wait() it owns the result. Deliberately no
254
+ // type inference for eager mode.
255
+ struct VISIBILITY_HIDDEN PythonAwaitWrapper
256
+ : std::enable_shared_from_this<PythonAwaitWrapper> {
257
+ explicit PythonAwaitWrapper(c10::intrusive_ptr<c10::ivalue::Await> aw)
258
+ : aw_(std::move(aw)) {}
259
+ explicit PythonAwaitWrapper(py::handle input) {
260
+ args_ = py::tuple(1u);
261
+ args_[0] = input;
262
+ auto type = PyObjectType::get();
263
+ aw_ = c10::make_intrusive<c10::ivalue::Await>(type);
264
+ aw_->markCompleted(toIValue(input, type));
265
+ }
266
+
267
+ explicit PythonAwaitWrapper(py::function pf, py::tuple args) {
268
+ pyfg_ = std::make_shared<torch::jit::PythonFunctionGuard>(std::move(pf));
269
+ args_ = std::move(args);
270
+ std::function<IValue()> f = [fg(pyfg_), &args(args_)]() {
271
+ pybind11::gil_scoped_acquire ag;
272
+ return toIValue(fg->func_(*args), PyObjectType::get());
273
+ };
274
+ aw_ = c10::make_intrusive<c10::ivalue::Await>(
275
+ PyObjectType::get(), std::move(f));
276
+ }
277
+
278
+ explicit PythonAwaitWrapper(const PythonAwaitWrapper&) = delete;
279
+ PythonAwaitWrapper& operator=(const PythonAwaitWrapper&) = delete;
280
+
281
+ py::object wait() {
282
+ py::gil_scoped_acquire acquire;
283
+ return toPyObject(aw_->wait());
284
+ }
285
+
286
+ // Nowait semantic means trivial case when Await is constructed from the
287
+ // result
288
+ bool is_nowait() {
289
+ return pyfg_ == nullptr;
290
+ }
291
+
292
+ const py::function fn() {
293
+ TORCH_CHECK(
294
+ pyfg_, "Await constructed as awaitable_nowait does not have fn");
295
+ return pyfg_->func_;
296
+ }
297
+
298
+ const py::tuple args() {
299
+ return args_;
300
+ }
301
+
302
+ TypePtr type() {
303
+ return aw_->type();
304
+ }
305
+
306
+ c10::intrusive_ptr<c10::ivalue::Await> aw_;
307
+ std::shared_ptr<torch::jit::PythonFunctionGuard> pyfg_;
308
+ py::tuple args_;
309
+
310
+ private:
311
+ std::shared_ptr<PythonAwaitWrapper> getPtr() {
312
+ return shared_from_this();
313
+ }
314
+ };
315
+
316
+ // error reporting: when reporting user-caused errors, these functions should
317
+ // not use AT_ERROR macros, since these macros add stack trace information
318
+ // that is confusing to display to the end user since it always reports
319
+ // locations in libtorch code rather than user code.
320
+
321
+ inline std::shared_ptr<CompilationUnit> get_python_cu() {
322
+ return py::module::import("torch.jit._state")
323
+ .attr("_python_cu")
324
+ .cast<std::shared_ptr<CompilationUnit>>();
325
+ }
326
+
327
+ struct TypedIValue : public std::pair<IValue, TypePtr> {
328
+ using pair::pair;
329
+
330
+ IValue& ivalue() {
331
+ return this->first;
332
+ }
333
+ TypePtr& type() {
334
+ return this->second;
335
+ }
336
+ };
337
+
338
+ inline TypedIValue toDictKeyIValue(py::handle key) {
339
+ if (py::isinstance<py::str>(key)) {
340
+ return TypedIValue(
341
+ ConstantString::create(py::cast<std::string>(key)), StringType::get());
342
+ } else if (py::isinstance<py::int_>(key)) {
343
+ return TypedIValue(py::cast<int64_t>(key), IntType::get());
344
+ } else if (py::isinstance<py::float_>(key)) {
345
+ return TypedIValue(py::cast<double>(key), FloatType::get());
346
+ } else {
347
+ AT_ERROR("Dictionary inputs may only have string, int, or float keys");
348
+ }
349
+ }
350
+
351
+ inline c10::optional<TypePtr> unifyOrInitializeType(
352
+ const TypePtr& accum,
353
+ const TypePtr& unify) {
354
+ if (!accum) {
355
+ return unify;
356
+ }
357
+ return unifyTypes(accum, unify);
358
+ }
359
+
360
+ using InferredType = c10::InferredType;
361
+
362
+ InferredType tryToInferContainerType(py::handle input, bool primitiveTypeOnly);
363
+
364
+ // Try to infer the type of a Python object
365
+ // The type cannot be inferred if:
366
+ // input is an empty container (list, dict)
367
+ // input is an list with element types that cannot be unified
368
+ // input is an dict with key or value types that cannot be unified
369
+ inline InferredType tryToInferType(py::handle input) {
370
+ // Try tensor types
371
+ if (THPVariable_Check(input.ptr())) {
372
+ return InferredType(TensorType::get());
373
+ }
374
+
375
+ if (input.is_none()) {
376
+ return InferredType(NoneType::get());
377
+ }
378
+
379
+ if (py::isinstance<StrongFunctionPtr>(input)) {
380
+ auto fn = py::cast<StrongFunctionPtr>(input).function_;
381
+ return InferredType(FunctionType::create(fn));
382
+ }
383
+
384
+ // Try basic types first
385
+ if (py::isinstance<py::bool_>(input)) {
386
+ return InferredType(BoolType::get());
387
+ // NOLINTNEXTLINE(bugprone-branch-clone)
388
+ } else if (py::isinstance<py::int_>(input)) {
389
+ return InferredType(IntType::get());
390
+ } else if (py::isinstance<py::float_>(input)) {
391
+ return InferredType(FloatType::get());
392
+ } else if (PyComplex_CheckExact(input.ptr())) {
393
+ return InferredType(ComplexType::get());
394
+ } else if (py::isinstance<py::str>(input)) {
395
+ return InferredType(StringType::get());
396
+ } else if (THPLayout_Check(input.ptr())) {
397
+ return InferredType(IntType::get());
398
+ } else if (THPDevice_Check(input.ptr())) {
399
+ return InferredType(DeviceObjType::get());
400
+ } else if (THPGenerator_Check(input.ptr())) {
401
+ return InferredType(GeneratorType::get());
402
+ } else if (THPStream_Check(input.ptr())) {
403
+ return InferredType(StreamObjType::get());
404
+ } else if (THPDtype_Check(input.ptr())) {
405
+ return InferredType(IntType::get());
406
+ } else if (THPQScheme_Check(input.ptr())) {
407
+ return InferredType(IntType::get());
408
+ } else if (THPLayout_Check(input.ptr())) {
409
+ return InferredType(IntType::get());
410
+ }
411
+
412
+ auto enum_type = py::module::import("enum").attr("Enum");
413
+ py::bool_ isEnumValue = py::isinstance(input, enum_type);
414
+ if (py::cast<bool>(isEnumValue)) {
415
+ auto enum_class = input.attr("__class__");
416
+ auto enum_type = py::cast<TypePtr>(
417
+ py::module::import("torch.jit.annotations")
418
+ .attr("try_ann_to_type")(enum_class, SourceRange()));
419
+ return InferredType(std::move(enum_type));
420
+ }
421
+
422
+ py::bool_ isClass =
423
+ py::module::import("inspect").attr("isclass")(input.get_type());
424
+ if (py::cast<bool>(isClass)) {
425
+ // Assume that the class is compiled already or will compile. Invalidate
426
+ // this later if needed.
427
+ bool class_compiled = true;
428
+
429
+ // Check if the type is already compiled.
430
+ py::object existing_ty = py::module::import("torch.jit._state")
431
+ .attr("_get_script_class")(input.get_type());
432
+
433
+ if (existing_ty.is_none()) {
434
+ // If not, try to compile it.
435
+ py::bool_ can_compile = py::module::import("torch._jit_internal")
436
+ .attr("can_compile_class")(input.get_type());
437
+
438
+ if (py::cast<bool>(can_compile)) {
439
+ // Try to compile the class. This is wrapped in a try-catch because
440
+ // compilation of class types can raise an Exception and in that case,
441
+ // we want to defer to other attempts at type inference below rather
442
+ // than fail compilation altogether.
443
+ try {
444
+ py::module::import("torch.jit._script")
445
+ .attr("_recursive_compile_class")(
446
+ input.get_type(), SourceRange());
447
+ } catch (...) {
448
+ // Invalidate the assumption that the class compiled so that we don't
449
+ // look up and return its JIT type as the type for the input.
450
+ class_compiled = false;
451
+ }
452
+ }
453
+ }
454
+
455
+ // If the class compiled successfully, look up the existing JIT type by
456
+ // qualified name and return it.
457
+ if (class_compiled) {
458
+ auto script_class = py::module::import("torch.jit._state")
459
+ .attr("_get_script_class")(input.get_type());
460
+
461
+ if (!script_class.is_none()) {
462
+ auto class_type = py::cast<ClassTypePtr>(script_class);
463
+
464
+ if (class_type && !class_type->is_module()) {
465
+ return InferredType(std::move(class_type));
466
+ }
467
+ }
468
+ }
469
+ }
470
+
471
+ if (py::isinstance<Object>(input)) {
472
+ auto object = py::cast<Object>(input);
473
+ return InferredType(object.type());
474
+ #ifdef USE_RPC
475
+ } else if (py::isinstance<torch::distributed::rpc::PyRRef>(input)) {
476
+ auto rref_ivalue = input.cast<torch::distributed::rpc::PyRRef>().toIValue();
477
+ return InferredType(rref_ivalue.type());
478
+ #endif
479
+ }
480
+
481
+ auto await_type = py::module::import("torch._awaits").attr("_Await");
482
+ py::bool_ is_await = py::isinstance(input, await_type);
483
+ if (py::cast<bool>(is_await)) {
484
+ auto awptr = input.cast<std::shared_ptr<PythonAwaitWrapper>>();
485
+ return InferredType(AwaitType::create(awptr->aw_->elementType()));
486
+ }
487
+
488
+ if (as_module(py::cast<py::object>(input))) {
489
+ return InferredType("Cannot infer type of ScriptModule");
490
+ }
491
+
492
+ auto module_type = py::module::import("torch.nn").attr("Module");
493
+ py::bool_ is_module = py::isinstance(input, module_type);
494
+ if (py::cast<bool>(is_module)) {
495
+ return InferredType("Cannot infer concrete type of torch.nn.Module");
496
+ }
497
+
498
+ // Try container types
499
+ return tryToInferContainerType(input, false);
500
+ }
501
+
502
+ // This function is similar to tryToInferType, but it only tries to infer
503
+ // primitive types (int, float, bool, complex) or nested container of primitive
504
+ // types.
505
+ inline InferredType tryToInferPrimitiveType(py::handle input) {
506
+ if (input.is_none()) {
507
+ return InferredType(NoneType::get());
508
+ }
509
+
510
+ // Only primitive data type
511
+ if (py::isinstance<py::bool_>(input)) {
512
+ return InferredType(BoolType::get());
513
+ // NOLINTNEXTLINE(bugprone-branch-clone)
514
+ } else if (py::isinstance<py::int_>(input)) {
515
+ return InferredType(IntType::get());
516
+ } else if (py::isinstance<py::float_>(input)) {
517
+ return InferredType(FloatType::get());
518
+ } else if (PyComplex_CheckExact(input.ptr())) {
519
+ return InferredType(ComplexType::get());
520
+ }
521
+
522
+ // Try container types
523
+ return tryToInferContainerType(input, true);
524
+ }
525
+
526
+ inline InferredType tryToInferContainerType(
527
+ py::handle input,
528
+ bool primitiveTypeOnly = false) {
529
+ if (six::isTuple(input)) {
530
+ py::tuple tuple = py::cast<py::tuple>(input);
531
+ std::vector<TypePtr> element_types;
532
+ element_types.reserve(tuple.size());
533
+
534
+ for (py::handle elem : tuple) {
535
+ auto type_match = primitiveTypeOnly ? tryToInferPrimitiveType(elem)
536
+ : tryToInferType(elem);
537
+ if (type_match.success()) {
538
+ element_types.push_back(type_match.type());
539
+ } else {
540
+ // Forward error message along
541
+ return type_match.reason();
542
+ }
543
+ }
544
+ return InferredType(TupleType::create(std::move(element_types)));
545
+ } else if (PyDict_Check(input.ptr())) {
546
+ // Check to make sure we can generate useful input/output types
547
+ auto dict = py::cast<py::dict>(input);
548
+ size_t len = py::len(dict);
549
+ if (!len) {
550
+ return InferredType("Dictionary inputs must have entries");
551
+ }
552
+
553
+ TypePtr key_type = nullptr;
554
+ TypePtr value_type = nullptr;
555
+
556
+ for (auto entry : dict) {
557
+ // Try to infer the key type and unify it with the existing one
558
+ auto entry_key_type_match = primitiveTypeOnly
559
+ ? tryToInferPrimitiveType(entry.first)
560
+ : tryToInferType(entry.first);
561
+ if (!entry_key_type_match.success()) {
562
+ return entry_key_type_match.reason();
563
+ }
564
+ auto unified_key =
565
+ unifyOrInitializeType(key_type, entry_key_type_match.type());
566
+ if (!unified_key) {
567
+ return InferredType(c10::str(
568
+ "Dictionary inputs to traced functions must have consistent type. Found ",
569
+ key_type->repr_str(),
570
+ " and ",
571
+ (entry_key_type_match.type())->repr_str()));
572
+ }
573
+
574
+ // Try to infer the value type and unify it with the existing one
575
+ auto entry_value_type_match = primitiveTypeOnly
576
+ ? tryToInferPrimitiveType(entry.second)
577
+ : tryToInferType(entry.second);
578
+ if (!entry_value_type_match.success()) {
579
+ return entry_value_type_match.reason();
580
+ }
581
+ auto unified_value =
582
+ unifyOrInitializeType(value_type, entry_value_type_match.type());
583
+ if (!unified_value) {
584
+ return InferredType(c10::str(
585
+ "Dictionary inputs to traced functions must have consistent type. Found ",
586
+ value_type->repr_str(),
587
+ " and ",
588
+ (entry_value_type_match.type())->repr_str()));
589
+ }
590
+
591
+ key_type = *unified_key;
592
+ value_type = *unified_value;
593
+ }
594
+ return InferredType(
595
+ DictType::create(std::move(key_type), std::move(value_type)));
596
+ } else if (PyList_Check(input.ptr())) {
597
+ auto list = py::cast<py::list>(input);
598
+ size_t len = py::len(list);
599
+ if (!len) {
600
+ return InferredType("List trace inputs must have elements");
601
+ }
602
+
603
+ TypePtr element_type = nullptr;
604
+ for (auto elem : list) {
605
+ auto element_type_match = primitiveTypeOnly
606
+ ? tryToInferPrimitiveType(elem)
607
+ : tryToInferType(elem);
608
+ if (!element_type_match.success()) {
609
+ return InferredType(c10::str(
610
+ "Could not infer type of list element: ",
611
+ element_type_match.reason()));
612
+ }
613
+ auto unified_type =
614
+ unifyOrInitializeType(element_type, element_type_match.type());
615
+ if (!unified_type) {
616
+ return InferredType(c10::str(
617
+ "List inputs to traced functions must have consistent element type. Found ",
618
+ element_type->repr_str(),
619
+ " and ",
620
+ (element_type_match.type())->repr_str()));
621
+ }
622
+ element_type = *unified_type;
623
+ }
624
+ return InferredType(ListType::create(element_type));
625
+ } else {
626
+ if (primitiveTypeOnly) {
627
+ return InferredType(c10::str(
628
+ "Only tuple, list, or dict (possibly nested) of primitive types (bool, float, int, complex)",
629
+ "are supported ",
630
+ "as inputs or outputs of traced functions",
631
+ ", but instead got value of type ",
632
+ py::str(input.get_type().attr("__name__")),
633
+ "."));
634
+ } else {
635
+ // TODO: this message is not correct anymore, since this InferredType is
636
+ // used from a bunch of circumstances unrelated to tracing. We can re-use
637
+ // this instead of the attribute_failure stuff in concreteType
638
+ return InferredType(c10::str(
639
+ "Only tensors and (possibly nested) tuples of tensors, lists, or dicts",
640
+ "are supported ",
641
+ "as inputs or outputs of traced functions",
642
+ ", but instead got value of type ",
643
+ py::str(input.get_type().attr("__name__")),
644
+ "."));
645
+ }
646
+ }
647
+ }
648
+
649
+ inline bool isTraceableType(const TypePtr& type) {
650
+ if (type->isSubtypeOf(*TensorType::get())) {
651
+ return true;
652
+ }
653
+
654
+ if (auto list_type = type->cast<ListType>()) {
655
+ return isTraceableType(list_type->getElementType());
656
+ }
657
+
658
+ if (auto tuple_type = type->cast<TupleType>()) {
659
+ return std::all_of(
660
+ tuple_type->elements().begin(),
661
+ tuple_type->elements().end(),
662
+ [](const TypePtr& element_type) {
663
+ return isTraceableType(element_type);
664
+ });
665
+ }
666
+
667
+ if (auto dict_type = type->cast<DictType>()) {
668
+ return isTraceableType(dict_type->getValueType());
669
+ }
670
+
671
+ return false;
672
+ }
673
+
674
+ inline IValue toTypeInferredIValue(py::handle input) {
675
+ auto match = tryToInferType(input);
676
+ if (!match.success()) {
677
+ auto object = py::cast<py::object>(input);
678
+ if (auto mod = as_module(object)) {
679
+ // if obj is already a ScriptModule, just return its ivalue
680
+ auto ptr = mod.value()._ivalue();
681
+ // explict copy semantics for strong ownership of the resource.
682
+ return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
683
+ ptr.release());
684
+ }
685
+
686
+ // Check if the obj is a ScriptObject.
687
+ if (auto script_obj = as_object(object)) {
688
+ auto ptr = script_obj.value()._ivalue();
689
+ return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
690
+ ptr.release());
691
+ }
692
+ AT_ERROR(
693
+ "Tracer cannot infer type of ", py::str(input), "\n:", match.reason());
694
+ }
695
+ return toIValue(input, match.type());
696
+ }
697
+
698
+ inline Stack toTraceableStack(const py::tuple& inputs) {
699
+ auto info = toTypeInferredIValue(inputs);
700
+ TORCH_CHECK(
701
+ isTraceableType(info.type()),
702
+ "Type '",
703
+ info.type()->repr_str(),
704
+ "' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and"
705
+ " Tuples of Tensors can be traced");
706
+ return info.toTupleRef().elements().vec();
707
+ }
708
+
709
+ // Serialize the python dictionary into a traceable stack.
710
+ inline Stack toTraceableStack(const py::dict& inputs) {
711
+ Stack res;
712
+ for (auto it = inputs.begin(); it != inputs.end(); it++) {
713
+ if (THPVariable_Check(it->second.ptr())) {
714
+ res.push_back(toIValue(it->second, tryToInferType(it->second).type()));
715
+ }
716
+ }
717
+ return res;
718
+ }
719
+
720
+ inline IValue createGenericList(py::handle obj, const TypePtr& elem_type) {
721
+ auto elems = c10::impl::GenericList(elem_type);
722
+ for (auto elem : obj) {
723
+ elems.push_back(toIValue(elem, elem_type));
724
+ }
725
+ return IValue(elems);
726
+ }
727
+
728
+ inline IValue createGenericDict(
729
+ const py::dict& obj,
730
+ const TypePtr& key_type,
731
+ const TypePtr& value_type) {
732
+ c10::impl::GenericDict elems(key_type, value_type);
733
+ elems.reserve(py::len(obj));
734
+ for (auto& entry : obj) {
735
+ elems.insert(
736
+ toIValue(entry.first, key_type), toIValue(entry.second, value_type));
737
+ }
738
+ return IValue(elems);
739
+ }
740
+
741
+ template <class T>
742
+ inline void guardAgainstNamedTensor(const T& var) {
743
+ TORCH_CHECK(
744
+ !var.has_names(),
745
+ "NYI: Named tensors are currently unsupported in TorchScript. As a "
746
+ "workaround please drop names via `tensor = tensor.rename(None)`.");
747
+ }
748
+
749
+ // Extract custom class registered with torchbind
750
+ template <typename T>
751
+ c10::intrusive_ptr<T> toCustomClass(py::handle obj) {
752
+ static_assert(
753
+ std::is_base_of<CustomClassHolder, T>::value, "T is not a CustomClass");
754
+ const auto& type = c10::getCustomClassType<c10::intrusive_ptr<T>>();
755
+ c10::IValue ivalue = toIValue(obj, type);
756
+ return std::move(ivalue).toCustomClass<T>();
757
+ }
758
+
759
+ // Small wrapper around getting the type name string from Python to make
760
+ // types easier to interpret, e.g. give the structural type for a NamedTuple
761
+ inline std::string friendlyTypeName(py::handle obj) {
762
+ if (py::isinstance<py::tuple>(obj) && py::hasattr(obj, "_fields")) {
763
+ auto field_names =
764
+ py::cast<std::vector<std::string>>(py::getattr(obj, "_fields"));
765
+ std::stringstream ss;
766
+ ss << py::str(obj.get_type().attr("__name__"));
767
+ ss << " (aka NamedTuple(";
768
+ bool first = true;
769
+ for (auto& field_name : field_names) {
770
+ if (!first) {
771
+ ss << ", ";
772
+ }
773
+ ss << field_name;
774
+ first = false;
775
+ }
776
+ ss << "))";
777
+ return ss.str();
778
+ } else {
779
+ return py::str(obj.get_type().attr("__name__"));
780
+ }
781
+ }
782
+
783
+ // Thrown when trying to create a schema for a list of python
784
+ // arguments that cannot be converted.
785
+ // Can be caught by the caller to attempt to use other schema
786
+ // when there is an overloaded operator.
787
+ struct schema_match_error : public std::runtime_error {
788
+ using std::runtime_error::runtime_error;
789
+ };
790
+
791
+ inline IValue argumentToIValue(
792
+ const FunctionSchema& schema,
793
+ size_t argumentPosition,
794
+ py::handle object) {
795
+ const auto& argument = schema.arguments().at(argumentPosition);
796
+ try {
797
+ return toIValue(object, argument.real_type(), argument.N());
798
+ } catch (const py::cast_error& error) {
799
+ throw schema_match_error(c10::str(
800
+ schema.formatTypeMismatchMsg(
801
+ argument,
802
+ friendlyTypeName(object),
803
+ argumentPosition,
804
+ py::repr(object)),
805
+ "\nCast error details: ",
806
+ error.what()));
807
+ } catch (const py::error_already_set& error) {
808
+ throw schema_match_error(c10::str(
809
+ schema.formatTypeMismatchMsg(
810
+ argument,
811
+ friendlyTypeName(object),
812
+ argumentPosition,
813
+ py::repr(object)),
814
+ "\n Python error details: ",
815
+ error.what()));
816
+ }
817
+ }
818
+
819
+ inline IValue returnToIValue(const TypePtr& type, py::handle object) {
820
+ try {
821
+ return toIValue(object, type);
822
+ } catch (const py::cast_error& error) {
823
+ throw std::runtime_error(c10::str(
824
+ " expected value of type ",
825
+ type->str(),
826
+ " for return value but instead got value of type ",
827
+ py::str(object.get_type().attr("__name__")),
828
+ ".",
829
+ "\nValue: ",
830
+ py::repr(object),
831
+ "\nCast error details: ",
832
+ error.what()));
833
+ }
834
+ }
835
+
836
+ inline py::object getScriptedClassOrError(const c10::NamedTypePtr& classType) {
837
+ auto py_class =
838
+ py::module::import("torch.jit._state")
839
+ .attr("_get_python_class")(classType->name()->qualifiedName());
840
+ if (py_class.is_none()) {
841
+ std::stringstream err;
842
+ err << "Unknown reference to ScriptClass ";
843
+ err << classType->name()->qualifiedName();
844
+ err << ". (Did you forget to import it?)";
845
+ throw std::runtime_error(err.str());
846
+ }
847
+ return py_class;
848
+ }
849
+
850
+ struct VISIBILITY_HIDDEN tuple_slice {
851
+ /*implicit*/ tuple_slice(py::tuple tup_)
852
+ : tup(std::move(tup_)), b(0), e(tup.size()) {}
853
+ tuple_slice(py::tuple tup_, int64_t b_)
854
+ : tup(std::move(tup_)), b(b_), e(tup.size()) {}
855
+ tuple_slice(py::tuple tup_, int64_t b_, int64_t e_)
856
+ : tup(std::move(tup_)), b(b_), e(e_) {}
857
+ py::detail::tuple_iterator begin() const {
858
+ return {tup, static_cast<pybind11::ssize_t>(b)};
859
+ }
860
+ py::detail::tuple_iterator end() const {
861
+ return {tup, static_cast<pybind11::ssize_t>(e)};
862
+ }
863
+ size_t size() const {
864
+ return e - b;
865
+ }
866
+ py::detail::tuple_accessor operator[](size_t index) const {
867
+ return {tup, static_cast<size_t>(b + index)};
868
+ }
869
+
870
+ private:
871
+ py::tuple tup;
872
+ int64_t b;
873
+ int64_t e;
874
+ };
875
+
876
+ inline Stack createStackForSchema(
877
+ const FunctionSchema& schema,
878
+ const tuple_slice& args,
879
+ const py::kwargs& kwargs,
880
+ c10::optional<IValue> self) {
881
+ size_t all_arguments = (self ? 1 : 0) + args.size() + kwargs.size();
882
+ if (all_arguments > schema.arguments().size()) {
883
+ throw schema_match_error(c10::str(
884
+ schema.name(),
885
+ "() expected at most ",
886
+ schema.arguments().size(),
887
+ " argument(s) but received ",
888
+ all_arguments,
889
+ " argument(s). Declaration: ",
890
+ schema));
891
+ }
892
+ Stack stack;
893
+ stack.reserve(schema.arguments().size());
894
+
895
+ int64_t arg_idx = 0;
896
+ if (self) {
897
+ push(stack, std::move(*self));
898
+ arg_idx++;
899
+ }
900
+ // First push all positional args.
901
+ for (const auto& arg : args) {
902
+ // ...but refuse to do it if the schema says that this was supposed
903
+ // to be keyword only
904
+ if (schema.arguments()[arg_idx].kwarg_only()) {
905
+ throw schema_match_error(c10::str(
906
+ schema.name(),
907
+ "() takes ",
908
+ arg_idx,
909
+ " positional argument(s) but ",
910
+ self ? 1 + args.size() : args.size(),
911
+ " was/were given. Declaration: ",
912
+ schema));
913
+ }
914
+ // Use the type information from the schema to convert the PyObject.
915
+ push(stack, argumentToIValue(schema, stack.size(), arg));
916
+ arg_idx++;
917
+ }
918
+
919
+ // Now for every remaining non-positional argument in the schema, look for it
920
+ // in the kwargs dict and push it if found, or use its default value if it
921
+ // has one.
922
+ size_t consumed_kwargs = 0;
923
+ for (size_t i = stack.size(); i < schema.arguments().size(); ++i) {
924
+ const auto& arg = schema.arguments()[i];
925
+ if (kwargs.contains(arg.name().c_str())) {
926
+ push(stack, argumentToIValue(schema, i, kwargs[arg.name().c_str()]));
927
+ consumed_kwargs += 1;
928
+ } else if (arg.default_value()) {
929
+ push(stack, *arg.default_value());
930
+ } else {
931
+ throw schema_match_error(c10::str(
932
+ schema.name(),
933
+ "() is missing value for argument '",
934
+ arg.name(),
935
+ "'. Declaration: ",
936
+ schema));
937
+ }
938
+ }
939
+
940
+ if (consumed_kwargs != kwargs.size()) {
941
+ std::vector<std::string> names;
942
+ for (const auto& kwarg : kwargs) {
943
+ names.emplace_back(py::cast<std::string>(kwarg.first));
944
+ }
945
+ throw schema_match_error(schema.findErrorInKwargs(names));
946
+ }
947
+
948
+ return stack;
949
+ }
950
+
951
+ inline py::object createPyObjectForStack(Stack&& stack) {
952
+ if (stack.empty()) {
953
+ return py::none();
954
+ }
955
+
956
+ // Return a simple value and not a single-element tuple if there is only one
957
+ // return value.
958
+ if (stack.size() == 1) {
959
+ return toPyObject(std::move(stack[0]));
960
+ }
961
+
962
+ // If there is more than one return value, pop them into a py::tuple.
963
+ py::tuple return_values(stack.size());
964
+ for (const auto ret : c10::irange(return_values.size())) {
965
+ return_values[ret] = toPyObject(std::move(stack[ret]));
966
+ }
967
+
968
+ return std::move(return_values);
969
+ }
970
+
971
+ // TODO: Remove once we clean up the GraphExecutor usage.
972
+ inline Stack evilDeprecatedBadCreateStackDoNotUse(
973
+ const py::tuple& tuple,
974
+ at::ArrayRef<Value*> inputs,
975
+ size_t reserve_extra_space = 0) {
976
+ if (tuple.size() != inputs.size()) {
977
+ AT_ERROR(
978
+ "expected " + std::to_string(inputs.size()) + " inputs, but got " +
979
+ std::to_string(tuple.size()));
980
+ }
981
+ Stack result;
982
+ result.reserve(tuple.size() + reserve_extra_space);
983
+ for (const auto i : c10::irange(inputs.size())) {
984
+ result.push_back(toIValue(std::move(tuple[i]), inputs[i]->type()));
985
+ }
986
+ return result;
987
+ }
988
+
989
+ // Run `callee`, potentially inserting a CallFunction/CallMethod node into the
990
+ // tracing graph.
991
+ inline py::object runAndInsertCall(
992
+ Function& callee,
993
+ const tuple_slice& args,
994
+ const py::kwargs& kwargs,
995
+ c10::optional<IValue> self,
996
+ // Lambda that tells this function how to insert `callee` into the graph if
997
+ // we're tracing.
998
+ const std::function<Value*(Graph&, const MatchedSchema& match)>&
999
+ callInserter) {
1000
+ auto stack =
1001
+ createStackForSchema(callee.getSchema(), args, kwargs, std::move(self));
1002
+ const auto& tracing_state = tracer::getTracingState();
1003
+ if (!tracing_state) {
1004
+ pybind11::gil_scoped_release no_gil_guard;
1005
+ // If we're not tracing, just run the callee as normal.
1006
+ callee.run(stack);
1007
+ } else {
1008
+ // If we are tracing, insert the appropriate CallFunction or CallMethod node
1009
+ // and then run the callee with tracing disabled.
1010
+
1011
+ // Get the graph `Value`s that represent the input IValues
1012
+ auto inputs = last(stack, callee.num_inputs());
1013
+ auto input_values =
1014
+ fmap(inputs, [](const IValue& v) { return tracer::getValueTrace(v); });
1015
+ TORCH_INTERNAL_ASSERT(callee.getSchema().returns().size() == 1)
1016
+ auto return_type = callee.getSchema().returns().at(0).type();
1017
+ auto graph = tracing_state->graph;
1018
+ std::vector<NamedValue> named_values;
1019
+ named_values.reserve(input_values.size());
1020
+ for (Value* v : input_values) {
1021
+ named_values.emplace_back(v);
1022
+ }
1023
+
1024
+ // Add a call node.
1025
+ MatchedSchema match = matchSchema(
1026
+ callee.getSchema(),
1027
+ tracer::getPythonInterpreterSourceRange(),
1028
+ *graph,
1029
+ named_values,
1030
+ {});
1031
+ auto output_value = callInserter(*graph, match);
1032
+
1033
+ // Actually run the callee. Pause the tracer so that we don't double-add the
1034
+ // callee nodes.
1035
+ {
1036
+ pybind11::gil_scoped_release no_gil_guard;
1037
+ ResourceGuard guard(tracer::pauseTracing());
1038
+ callee.run(stack);
1039
+ }
1040
+
1041
+ // Associate the output IValues with the output `Value`s in the graph
1042
+ tracer::setValueTrace(stack.back(), output_value);
1043
+ }
1044
+
1045
+ TORCH_CHECK(
1046
+ !stack.empty(),
1047
+ "Expected values in the stack after execution but found none");
1048
+ return toPyObject(std::move(stack.back()));
1049
+ }
1050
+
1051
+ inline c10::optional<py::object> maybeTorchFunctionDispatch(
1052
+ const py::object& callee,
1053
+ const tuple_slice& args_no_self,
1054
+ const py::kwargs& kwargs,
1055
+ const c10::QualifiedName qualname) {
1056
+ std::vector<py::handle> args_vec;
1057
+ for (const auto& arg : args_no_self) {
1058
+ args_vec.push_back(arg);
1059
+ }
1060
+ py::tuple args = py::cast(args_vec);
1061
+
1062
+ // Handle __torch_function__ dispatch
1063
+ std::vector<PyObject*> overloaded_args;
1064
+ size_t total_arg_num = args.size() + kwargs.size();
1065
+ for (const auto& arg : args) {
1066
+ is_tensor_and_append_overloaded(arg.ptr(), &overloaded_args);
1067
+ is_tensor_list_and_append_overloaded(
1068
+ arg.ptr(),
1069
+ &overloaded_args,
1070
+ static_cast<int>(total_arg_num),
1071
+ false /* throw_error */);
1072
+ }
1073
+ // NB: for kwargs, we cannot guarantee the order of appending
1074
+ // is the same as the argument order in operator's schema.
1075
+ // This is suboptimal, but should be fine. Later when we have
1076
+ // better schema matching and argument parsing, we could
1077
+ // match the operator in `operations` first, then the order will
1078
+ // be guaranteed.
1079
+ for (auto item : kwargs) {
1080
+ is_tensor_and_append_overloaded(item.second.ptr(), &overloaded_args);
1081
+ is_tensor_list_and_append_overloaded(
1082
+ item.second.ptr(),
1083
+ &overloaded_args,
1084
+ total_arg_num,
1085
+ false /* throw_error */);
1086
+ }
1087
+ if (!overloaded_args.empty()) {
1088
+ return pybind11::reinterpret_steal<py::object>(
1089
+ handle_torch_function_no_python_arg_parser(
1090
+ /*overloaded_args=*/overloaded_args,
1091
+ /*args=*/args.ptr(),
1092
+ /*kwargs=*/kwargs.ptr(),
1093
+ /*func_name=*/qualname.name().c_str(),
1094
+ /*torch_api_function=*/callee.ptr(),
1095
+ /*module_name=*/qualname.prefix().c_str()));
1096
+ }
1097
+
1098
+ return c10::nullopt;
1099
+ }
1100
+
1101
+ inline py::object invokeScriptFunctionFromPython(
1102
+ Function& callee,
1103
+ const tuple_slice& args,
1104
+ const py::kwargs& kwargs) {
1105
+ // TODO: we could add __torch_function__ dispatch here but I don't know
1106
+ // the implications of doing so
1107
+
1108
+ return runAndInsertCall(
1109
+ callee,
1110
+ args,
1111
+ kwargs,
1112
+ /*self=*/c10::nullopt,
1113
+ [&](Graph& graph, const MatchedSchema& match) {
1114
+ return graph.insertFunctionCall(&callee, match);
1115
+ });
1116
+ }
1117
+
1118
+ inline py::object invokeScriptMethodFromPython(
1119
+ Method& callee,
1120
+ const tuple_slice& args,
1121
+ const py::kwargs& kwargs) {
1122
+ auto self = callee.owner()._ivalue();
1123
+
1124
+ if (auto torch_fn_result = maybeTorchFunctionDispatch(
1125
+ py::cast(callee), args, kwargs, callee.name())) {
1126
+ return *torch_fn_result;
1127
+ }
1128
+
1129
+ return runAndInsertCall(
1130
+ callee.function(),
1131
+ args,
1132
+ kwargs,
1133
+ self,
1134
+ [&](Graph& graph, const MatchedSchema& match) {
1135
+ return graph.insertMethodCall(callee.name(), match);
1136
+ });
1137
+ }
1138
+
1139
+ TORCH_PYTHON_API std::pair<std::shared_ptr<Operator>, Stack> getOpWithStack(
1140
+ const std::vector<std::shared_ptr<Operator>>& operations,
1141
+ py::args args,
1142
+ const py::kwargs& kwargs);
1143
+
1144
+ TORCH_PYTHON_API py::object invokeOperatorFromPython(
1145
+ const std::vector<std::shared_ptr<Operator>>& operations,
1146
+ py::args args,
1147
+ const py::kwargs& kwargs,
1148
+ c10::optional<c10::DispatchKey> dk = c10::nullopt);
1149
+
1150
+ TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet(
1151
+ const std::vector<std::shared_ptr<Operator>>& operations,
1152
+ Symbol symbol,
1153
+ py::args args,
1154
+ const py::kwargs& kwargs,
1155
+ bool is_overload,
1156
+ c10::optional<c10::DispatchKey> dk = c10::nullopt);
1157
+
1158
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/hash.h>
4
+ #include <c10/util/irange.h>
5
+ #include <torch/csrc/autograd/variable.h>
6
+ #include <torch/csrc/jit/python/pybind.h>
7
+
8
+ #include <ATen/ATen.h>
9
+ #include <functional>
10
+ #include <tuple>
11
+ #include <vector>
12
+
13
+ namespace torch::jit::python {
14
+
15
+ struct IODescriptor {
16
+ struct VariableMetadata {
17
+ VariableMetadata(const autograd::Variable& var)
18
+ : sizes(var.sizes().vec()),
19
+ type(var.scalar_type()),
20
+ device(var.device()),
21
+ requires_grad(var.requires_grad()) {}
22
+
23
+ bool operator==(const VariableMetadata& o) const {
24
+ return std::tie(device, requires_grad, type, sizes) ==
25
+ std::tie(o.device, o.requires_grad, o.type, o.sizes);
26
+ }
27
+
28
+ static size_t hash(const VariableMetadata& m) {
29
+ return c10::get_hash(m.sizes, m.device, m.requires_grad, m.type);
30
+ }
31
+
32
+ std::vector<int64_t> sizes;
33
+ at::ScalarType type;
34
+ at::Device device;
35
+ bool requires_grad;
36
+ };
37
+
38
+ bool operator==(const IODescriptor& o) const {
39
+ return std::tie(structure, metadata, grad_enabled) ==
40
+ std::tie(o.structure, o.metadata, o.grad_enabled);
41
+ }
42
+
43
+ static size_t hash(const IODescriptor& o) {
44
+ return c10::get_hash(o.structure, o.metadata, o.grad_enabled);
45
+ }
46
+
47
+ void extend(const autograd::variable_list& list) {
48
+ metadata.reserve(metadata.size() + list.size());
49
+ for (auto& var : list)
50
+ metadata.emplace_back(var);
51
+ }
52
+
53
+ // Description of argument structure. Variables are replaced with
54
+ // different characters, depending on their flags, beginnings and
55
+ // ends of tuples and lists are denoted by a pair of parenthesis
56
+ // of their corresponding kind. They should always be paired.
57
+ // Example desc: (vv[v(v)v])
58
+ // NOTE: if extend() was ever called then metadata.size() can be
59
+ // different than the number of 'v's in structure.
60
+ std::string structure;
61
+ std::vector<std::string> strings;
62
+ std::vector<VariableMetadata> metadata;
63
+ bool grad_enabled = false;
64
+ };
65
+
66
+ static inline std::ostream& operator<<(
67
+ std::ostream& out,
68
+ const IODescriptor::VariableMetadata& meta) {
69
+ at::Device meta_device = meta.device;
70
+ auto& t = at::getDeprecatedTypeProperties(
71
+ meta_device.is_cpu() ? at::Backend::CPU : at::Backend::CUDA, meta.type);
72
+ out << t << "(requires_grad=" << meta.requires_grad;
73
+ if (meta_device.is_cuda()) {
74
+ out << ", device=" << meta_device.index();
75
+ }
76
+ out << ") {";
77
+ for (const auto i : c10::irange(meta.sizes.size())) {
78
+ if (i > 0)
79
+ out << ", ";
80
+ out << meta.sizes[i];
81
+ }
82
+ out << "}";
83
+ return out;
84
+ }
85
+
86
+ static inline std::ostream& operator<<(
87
+ std::ostream& out,
88
+ const IODescriptor& desc) {
89
+ out << desc.structure << "\n";
90
+ out << " with grad_enabled=" << desc.grad_enabled << "\n";
91
+ for (const auto i : c10::irange(desc.metadata.size())) {
92
+ out << " with v" << i << " having type " << desc.metadata[i] << "\n";
93
+ }
94
+ return out;
95
+ }
96
+
97
+ struct ParsedArgs {
98
+ // Flat vector of Variables found in arguments
99
+ autograd::variable_list vars;
100
+ // Metadata describing nesting of objects received from Python and
101
+ // metadata of vars and whether grad is enabled.
102
+ IODescriptor desc;
103
+
104
+ void extend(const autograd::variable_list& list) {
105
+ if (list.empty())
106
+ return;
107
+ vars.reserve(vars.size() + list.size());
108
+ for (auto& var : list)
109
+ vars.emplace_back(var);
110
+ desc.extend(list);
111
+ }
112
+ };
113
+
114
+ ParsedArgs flatten(py::handle obj);
115
+ PyObject* unflatten(
116
+ at::ArrayRef<autograd::Variable> vars,
117
+ const IODescriptor& structure);
118
+
119
+ } // namespace torch::jit::python
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/python/pybind_utils.h>
4
+ #include <torch/csrc/utils/pybind.h>
5
+ #include <torch/custom_class.h>
6
+
7
+ namespace torch::jit {
8
+
9
+ void initPythonCustomClassBindings(PyObject* module);
10
+
11
+ struct ScriptClass {
12
+ ScriptClass(c10::StrongTypePtr class_type)
13
+ : class_type_(std::move(class_type)) {}
14
+
15
+ py::object __call__(py::args args, py::kwargs kwargs);
16
+
17
+ c10::StrongTypePtr class_type_;
18
+ };
19
+
20
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dict.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ namespace torch::jit {
9
+
10
+ void initScriptDictBindings(PyObject* module);
11
+
12
+ /// An iterator over the keys of ScriptDict. This is used to support
13
+ /// .keys() and iteration.
14
+ class ScriptDictKeyIterator final {
15
+ public:
16
+ ScriptDictKeyIterator(
17
+ c10::impl::GenericDict::iterator iter,
18
+ c10::impl::GenericDict::iterator end)
19
+ : iter_(std::move(iter)), end_(std::move(end)) {}
20
+ IValue next();
21
+
22
+ private:
23
+ c10::impl::GenericDict::iterator iter_;
24
+ c10::impl::GenericDict::iterator end_;
25
+ };
26
+
27
+ /// An iterator over the key-value pairs of ScriptDict. This is used to support
28
+ /// .items().
29
+ class ScriptDictIterator final {
30
+ public:
31
+ ScriptDictIterator(
32
+ c10::impl::GenericDict::iterator iter,
33
+ c10::impl::GenericDict::iterator end)
34
+ : iter_(std::move(iter)), end_(std::move(end)) {}
35
+ IValue next();
36
+
37
+ private:
38
+ c10::impl::GenericDict::iterator iter_;
39
+ c10::impl::GenericDict::iterator end_;
40
+ };
41
+
42
+ /// A wrapper around c10::Dict that can be exposed in Python via pybind
43
+ /// with an API identical to the Python dictionary class. This allows
44
+ /// dictionaries to have reference semantics across the Python/TorchScript
45
+ /// boundary.
46
+ class ScriptDict final {
47
+ public:
48
+ // Constructor.
49
+ ScriptDict(IValue data) : dict_(AnyType::get(), AnyType::get()) {
50
+ TORCH_INTERNAL_ASSERT(data.isGenericDict());
51
+ dict_ = data.toGenericDict();
52
+ }
53
+
54
+ // Get the type of the dictionary.
55
+ DictTypePtr type() const {
56
+ return DictType::create(dict_.keyType(), dict_.valueType());
57
+ }
58
+
59
+ // Return a string representation that can be used
60
+ // to reconstruct the instance.
61
+ std::string repr() const {
62
+ std::ostringstream s;
63
+ s << '{';
64
+ bool f = false;
65
+ for (auto const& kv : dict_) {
66
+ if (f) {
67
+ s << ", ";
68
+ }
69
+ s << kv.key() << ": " << kv.value();
70
+ f = true;
71
+ }
72
+ s << '}';
73
+ return s.str();
74
+ }
75
+
76
+ // Return an iterator over the keys of the dictionary.
77
+ ScriptDictKeyIterator iter() const {
78
+ auto begin = dict_.begin();
79
+ auto end = dict_.end();
80
+ return ScriptDictKeyIterator(begin, end);
81
+ }
82
+
83
+ // Return an iterator over the key-value pairs of the dictionary.
84
+ ScriptDictIterator items() const {
85
+ auto begin = dict_.begin();
86
+ auto end = dict_.end();
87
+ return ScriptDictIterator(begin, end);
88
+ }
89
+
90
+ // Interpret the dictionary as a boolean; empty means false, non-empty means
91
+ // true.
92
+ bool toBool() const {
93
+ return !(dict_.empty());
94
+ }
95
+
96
+ // Get the value for the given key. Throws std::out_of_range if the key does
97
+ // not exist.
98
+ IValue getItem(const IValue& key) {
99
+ return dict_.at(key);
100
+ };
101
+
102
+ // Set the value for the given key.
103
+ void setItem(const IValue& key, const IValue& value) {
104
+ dict_.insert_or_assign(key, value);
105
+ };
106
+
107
+ // Check whether the dictionary contains the given key.
108
+ bool contains(const IValue& key) {
109
+ return dict_.contains(key);
110
+ }
111
+
112
+ // Delete the given key from the dictionary.
113
+ bool delItem(const IValue& key) {
114
+ return dict_.erase(key);
115
+ }
116
+
117
+ // Get the size of the dictionary.
118
+ int64_t len() const {
119
+ return dict_.size();
120
+ }
121
+
122
+ // A c10::Dict instance that holds the actual data.
123
+ c10::impl::GenericDict dict_;
124
+ };
125
+
126
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/utils/object_ptr.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ void initPythonIRBindings(PyObject* module);
9
+
10
+ // execute a Python function, used for Ops we can't optimize but that we want to
11
+ // optimize around
12
+ struct ConcretePythonOp : public PythonOp {
13
+ static Symbol Kind;
14
+
15
+ ConcretePythonOp(Graph* graph) : PythonOp(graph, ::c10::prim::PythonOp) {}
16
+ ConcretePythonOp* init(
17
+ THPObjectPtr&& pyobj,
18
+ const std::string& cconv,
19
+ pyobj_list&& scalar_args) {
20
+ this->pyobj = std::move(pyobj);
21
+ this->scalar_args = std::move(scalar_args);
22
+ this->cconv = cconv;
23
+ return this;
24
+ }
25
+ // The Python object which contains the implementation of this function.
26
+ // This is either a class (non-legacy) or an object (legacy). See
27
+ // TraceInterpreterState for execution semantics.
28
+ THPObjectPtr pyobj;
29
+ // The calling convention for the Python function.
30
+ // 'c' -- constant argument
31
+ // 'd' -- dynamic argument
32
+ std::string cconv;
33
+ // Scalar arguments to the Python function. Not necessarily passed to
34
+ // the function in this order; see cconv for the correct order.
35
+ std::vector<THPObjectPtr> scalar_args;
36
+
37
+ std::string name() const override;
38
+ void cloneFrom(Node* other_) override;
39
+ Node* allocNewInstance(Graph* g) override {
40
+ return new ConcretePythonOp(g);
41
+ }
42
+ // recover the autograd.Function instance, if this PythonOp's function
43
+ // was originally SomeFunction.apply
44
+ // used in ONNX for discovering symbolics
45
+ c10::optional<THPObjectPtr> autogradFunction() const override;
46
+ void writeScalars(std::ostream& out) const override;
47
+ void lint_python() const override;
48
+ };
49
+
50
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <pybind11/pybind11.h>
4
+ #include <torch/csrc/jit/python/pybind_utils.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ namespace py = pybind11;
9
+
10
+ namespace c10::ivalue {
11
+
12
+ // concrete ivalue Holder that hold a py::object
13
+ struct C10_EXPORT ConcretePyObjectHolder final : PyObjectHolder {
14
+ public:
15
+ static c10::intrusive_ptr<PyObjectHolder> create(py::object py_obj) {
16
+ return c10::make_intrusive<ConcretePyObjectHolder>(std::move(py_obj));
17
+ }
18
+
19
+ static c10::intrusive_ptr<PyObjectHolder> create(const py::handle& handle) {
20
+ py::gil_scoped_acquire ag;
21
+ return c10::make_intrusive<ConcretePyObjectHolder>(
22
+ handle.cast<py::object>());
23
+ }
24
+
25
+ PyObject* getPyObject() override {
26
+ return py_obj_.ptr();
27
+ }
28
+
29
+ InferredType tryToInferType() override {
30
+ pybind11::gil_scoped_acquire ag;
31
+ return torch::jit::tryToInferType(py_obj_);
32
+ }
33
+
34
+ IValue toIValue(const TypePtr& type, c10::optional<int32_t> N = c10::nullopt)
35
+ override {
36
+ pybind11::gil_scoped_acquire ag;
37
+ return torch::jit::toIValue(py_obj_, type, N);
38
+ }
39
+
40
+ std::string toStr() override {
41
+ pybind11::gil_scoped_acquire ag;
42
+ return py::str(py_obj_);
43
+ }
44
+
45
+ std::vector<at::Tensor> extractTensors() override {
46
+ // We could implement this entirely in C++ via pybind11 but it turns out to
47
+ // be substantially slower. Namely, the total time taken by markCompleted on
48
+ // a CUDAFuture is 21.5us with this implementation, but goes up to 58.7us
49
+ // when using C++. The reason is unclear.
50
+ try {
51
+ pybind11::gil_scoped_acquire ag;
52
+ static py::object& extractorFn = *new py::object(
53
+ py::module::import("torch._jit_internal").attr("_extract_tensors"));
54
+ return extractorFn(py_obj_).cast<std::vector<at::Tensor>>();
55
+ } catch (py::error_already_set& e) {
56
+ auto err = std::runtime_error(
57
+ c10::str("Cannot extract tensors from value: ", e.what()));
58
+ {
59
+ pybind11::gil_scoped_acquire ag;
60
+ e.restore();
61
+ PyErr_Clear();
62
+ }
63
+ throw err;
64
+ }
65
+ }
66
+
67
+ // Note [Destructing py::object]
68
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~
69
+ //
70
+ // (1) Why py_obj_ = py::none(); does not work. Because we also need to
71
+ // acquire GIL when destructing py::object of None that de-references None.
72
+ // https://docs.python.org/3/c-api/none.html#c.Py_RETURN_NONE
73
+ //
74
+ // https://stackoverflow.com/questions/15287590/why-should-py-increfpy-none-be-required-before-returning-py-none-in-c
75
+ //
76
+ // (2) Why we need to call dec_ref() explicitly. Because py::object of
77
+ // nullptr, on destruction, effectively does nothing because of it calls
78
+ // Py_XDECREF(NULL) underlying.
79
+ // https://docs.python.org/3/c-api/refcounting.html#c.Py_XDECREF
80
+ ~ConcretePyObjectHolder() override {
81
+ pybind11::gil_scoped_acquire ag;
82
+ py_obj_.dec_ref();
83
+ // explicitly setting PyObject* to nullptr to prevent py::object's dtor to
84
+ // decref on the PyObject again.
85
+ py_obj_.ptr() = nullptr;
86
+ }
87
+
88
+ // explicit construction to avoid errornous implicit conversion and
89
+ // copy-initialization
90
+ explicit ConcretePyObjectHolder(py::object py_obj)
91
+ : py_obj_(std::move(py_obj)) {}
92
+
93
+ private:
94
+ py::object py_obj_;
95
+ };
96
+
97
+ } // namespace c10::ivalue
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dict.h>
4
+ #include <ATen/core/List.h>
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/jit_type.h>
7
+ #include <c10/util/Optional.h>
8
+ #include <pybind11/detail/common.h>
9
+ #include <torch/csrc/utils/pybind.h>
10
+ #include <cstddef>
11
+ #include <stdexcept>
12
+
13
+ namespace torch::jit {
14
+
15
+ void initScriptListBindings(PyObject* module);
16
+
17
+ /// An iterator over the elements of ScriptList. This is used to support
18
+ /// __iter__(), .
19
+ class ScriptListIterator final {
20
+ public:
21
+ ScriptListIterator(
22
+ c10::impl::GenericList::iterator iter,
23
+ c10::impl::GenericList::iterator end)
24
+ : iter_(iter), end_(end) {}
25
+ IValue next();
26
+ bool done() const;
27
+
28
+ private:
29
+ c10::impl::GenericList::iterator iter_;
30
+ c10::impl::GenericList::iterator end_;
31
+ };
32
+
33
+ /// A wrapper around c10::List that can be exposed in Python via pybind
34
+ /// with an API identical to the Python list class. This allows
35
+ /// lists to have reference semantics across the Python/TorchScript
36
+ /// boundary.
37
+ class ScriptList final {
38
+ public:
39
+ // TODO: Do these make sense?
40
+ using size_type = size_t;
41
+ using diff_type = ptrdiff_t;
42
+ using ssize_t = Py_ssize_t;
43
+
44
+ // Constructor for empty lists created during slicing, extending, etc.
45
+ ScriptList(const TypePtr& type) : list_(AnyType::get()) {
46
+ auto list_type = type->expect<ListType>();
47
+ list_ = c10::impl::GenericList(list_type);
48
+ }
49
+
50
+ // Constructor for instances based on existing lists (e.g. a
51
+ // Python instance or a list nested inside another).
52
+ ScriptList(IValue data) : list_(AnyType::get()) {
53
+ TORCH_INTERNAL_ASSERT(data.isList());
54
+ list_ = data.toList();
55
+ }
56
+
57
+ ListTypePtr type() const {
58
+ return ListType::create(list_.elementType());
59
+ }
60
+
61
+ // Return a string representation that can be used
62
+ // to reconstruct the instance.
63
+ std::string repr() const {
64
+ std::ostringstream s;
65
+ s << '[';
66
+ bool f = false;
67
+ for (auto const& elem : list_) {
68
+ if (f) {
69
+ s << ", ";
70
+ }
71
+ s << IValue(elem);
72
+ f = true;
73
+ }
74
+ s << ']';
75
+ return s.str();
76
+ }
77
+
78
+ // Return an iterator over the elements of the list.
79
+ ScriptListIterator iter() const {
80
+ auto begin = list_.begin();
81
+ auto end = list_.end();
82
+ return ScriptListIterator(begin, end);
83
+ }
84
+
85
+ // Interpret the list as a boolean; empty means false, non-empty means
86
+ // true.
87
+ bool toBool() const {
88
+ return !(list_.empty());
89
+ }
90
+
91
+ // Get the value for the given index.
92
+ IValue getItem(diff_type idx) {
93
+ idx = wrap_index(idx);
94
+ return list_.get(idx);
95
+ };
96
+
97
+ // Set the value corresponding to the given index.
98
+ void setItem(diff_type idx, const IValue& value) {
99
+ idx = wrap_index(idx);
100
+ return list_.set(idx, value);
101
+ }
102
+
103
+ // Check whether the list contains the given value.
104
+ bool contains(const IValue& value) {
105
+ for (const auto& elem : list_) {
106
+ if (elem == value) {
107
+ return true;
108
+ }
109
+ }
110
+
111
+ return false;
112
+ }
113
+
114
+ // Delete the item at the given index from the list.
115
+ void delItem(diff_type idx) {
116
+ idx = wrap_index(idx);
117
+ auto iter = list_.begin() + idx;
118
+ list_.erase(iter);
119
+ }
120
+
121
+ // Get the size of the list.
122
+ ssize_t len() const {
123
+ return list_.size();
124
+ }
125
+
126
+ // Count the number of times a value appears in the list.
127
+ ssize_t count(const IValue& value) const {
128
+ ssize_t total = 0;
129
+
130
+ for (const auto& elem : list_) {
131
+ if (elem == value) {
132
+ ++total;
133
+ }
134
+ }
135
+
136
+ return total;
137
+ }
138
+
139
+ // Remove the first occurrence of a value from the list.
140
+ void remove(const IValue& value) {
141
+ auto list = list_;
142
+
143
+ int64_t idx = -1, i = 0;
144
+
145
+ for (const auto& elem : list) {
146
+ if (elem == value) {
147
+ idx = i;
148
+ break;
149
+ }
150
+
151
+ ++i;
152
+ }
153
+
154
+ if (idx == -1) {
155
+ throw py::value_error();
156
+ }
157
+
158
+ list.erase(list.begin() + idx);
159
+ }
160
+
161
+ // Append a value to the end of the list.
162
+ void append(const IValue& value) {
163
+ list_.emplace_back(value);
164
+ }
165
+
166
+ // Clear the contents of the list.
167
+ void clear() {
168
+ list_.clear();
169
+ }
170
+
171
+ // Append the contents of an iterable to the list.
172
+ void extend(const IValue& iterable) {
173
+ list_.append(iterable.toList());
174
+ }
175
+
176
+ // Remove and return the element at the specified index from the list. If no
177
+ // index is passed, the last element is removed and returned.
178
+ IValue pop(c10::optional<size_type> idx = c10::nullopt) {
179
+ IValue ret;
180
+
181
+ if (idx) {
182
+ idx = wrap_index(*idx);
183
+ ret = list_.get(*idx);
184
+ list_.erase(list_.begin() + *idx);
185
+ } else {
186
+ ret = list_.get(list_.size() - 1);
187
+ list_.pop_back();
188
+ }
189
+
190
+ return ret;
191
+ }
192
+
193
+ // Insert a value before the given index.
194
+ void insert(const IValue& value, diff_type idx) {
195
+ // wrap_index cannot be used; idx == len() is allowed
196
+ if (idx < 0) {
197
+ idx += len();
198
+ }
199
+
200
+ if (idx < 0 || idx > len()) {
201
+ throw std::out_of_range("list index out of range");
202
+ }
203
+
204
+ list_.insert(list_.begin() + idx, value);
205
+ }
206
+
207
+ // A c10::List instance that holds the actual data.
208
+ c10::impl::GenericList list_;
209
+
210
+ private:
211
+ // Wrap an index so that it can safely be used to access
212
+ // the list. For list of size sz, this function can successfully
213
+ // wrap indices in the range [-sz, sz-1]
214
+ diff_type wrap_index(diff_type idx) {
215
+ auto sz = len();
216
+ if (idx < 0) {
217
+ idx += sz;
218
+ }
219
+
220
+ if (idx < 0 || idx >= sz) {
221
+ throw std::out_of_range("list index out of range");
222
+ }
223
+
224
+ return idx;
225
+ }
226
+ };
227
+
228
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/frontend/source_range.h>
4
+ #include <torch/csrc/jit/frontend/tracer.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ #include <memory>
9
+ #include <string>
10
+
11
+ namespace torch::jit {
12
+
13
+ struct Module;
14
+
15
+ namespace tracer {
16
+ void initPythonTracerBindings(PyObject* module);
17
+
18
+ SourceRange getPythonInterpreterSourceRange();
19
+
20
+ Node* preRecordPythonTrace(
21
+ THPObjectPtr pyobj,
22
+ const std::string& arg_types,
23
+ at::ArrayRef<autograd::Variable> inputs,
24
+ std::vector<THPObjectPtr> scalar_args);
25
+
26
+ std::pair<std::shared_ptr<Graph>, Stack> createGraphByTracingWithDict(
27
+ const py::function& func,
28
+ const py::dict& inputs_dict,
29
+ Stack inputs,
30
+ const py::function& var_name_lookup_fn,
31
+ bool strict,
32
+ bool force_outplace,
33
+ Module* self = nullptr,
34
+ const std::vector<std::string>& argument_names = {});
35
+
36
+ std::pair<std::shared_ptr<Graph>, Stack> createGraphByTracing(
37
+ const py::function& func,
38
+ Stack inputs,
39
+ const py::function& var_name_lookup_fn,
40
+ bool strict,
41
+ bool force_outplace,
42
+ Module* self = nullptr,
43
+ const std::vector<std::string>& argument_names = {});
44
+ } // namespace tracer
45
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ void initTreeViewBindings(PyObject* module);
8
+
9
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/python/pybind.h>
4
+
5
+ namespace torch::jit {
6
+ void initJitScriptBindings(PyObject* module);
7
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ namespace torch::jit {
4
+ TORCH_API void setGraphExecutorOptimize(bool o);
5
+ TORCH_API bool getGraphExecutorOptimize();
6
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ namespace torch::jit {
4
+ TORCH_API void setUTF8DecodingIgnore(bool o);
5
+ TORCH_API bool getUTF8DecodingIgnore();
6
+ } // namespace torch::jit
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/ir.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
5
+ #include <torch/csrc/jit/tensorexpr/stmt.h>
6
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+ class HasRand : public IRVisitor {
14
+ public:
15
+ HasRand(StmtPtr stmt) : stmt_(std::move(stmt)) {
16
+ stmt_->accept(this);
17
+ }
18
+
19
+ bool has_rand() const {
20
+ return has_rand_;
21
+ }
22
+
23
+ private:
24
+ void visit(IntrinsicsPtr v) override {
25
+ if (v->op_type() == IntrinsicsOp::kRand) {
26
+ has_rand_ = true;
27
+ } else {
28
+ IRVisitor::visit(std::move(v));
29
+ }
30
+ }
31
+ StmtPtr stmt_;
32
+ bool has_rand_ = false;
33
+ };
34
+
35
+ template <typename Op>
36
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
37
+ class NodeFinder : public IRVisitor {
38
+ public:
39
+ void visit(NodePtr<Op> v) override {
40
+ nodes.push_back((NodePtr<Op>)v);
41
+ IRVisitor::visit(v);
42
+ }
43
+
44
+ static std::vector<NodePtr<Op>> find(StmtPtr s) {
45
+ NodeFinder<Op> nf;
46
+ s->accept(&nf);
47
+ return nf.nodes;
48
+ }
49
+
50
+ static std::vector<NodePtr<Op>> find(ExprPtr e) {
51
+ NodeFinder<Op> nf;
52
+ e->accept(&nf);
53
+ return nf.nodes;
54
+ }
55
+
56
+ std::vector<NodePtr<Op>> nodes;
57
+ };
58
+
59
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
60
+ class VarFinder : public IRVisitor {
61
+ public:
62
+ void visit(VarPtr v) override {
63
+ vars_.insert(v);
64
+ IRVisitor::visit(std::move(v));
65
+ }
66
+
67
+ static std::unordered_set<VarPtr> find(StmtPtr s) {
68
+ VarFinder nf;
69
+ s->accept(&nf);
70
+ return nf.vars();
71
+ }
72
+
73
+ static std::unordered_set<VarPtr> find(ExprPtr e) {
74
+ VarFinder nf;
75
+ e->accept(&nf);
76
+ return nf.vars();
77
+ }
78
+
79
+ const std::unordered_set<VarPtr>& vars() {
80
+ return vars_;
81
+ }
82
+
83
+ private:
84
+ std::unordered_set<VarPtr> vars_;
85
+ };
86
+
87
+ class BufFinder : public IRVisitor {
88
+ public:
89
+ void visit(BufPtr v) override {
90
+ bufs_.insert(v);
91
+ IRVisitor::visit(std::move(v));
92
+ }
93
+
94
+ static std::unordered_set<BufPtr> find(StmtPtr s) {
95
+ BufFinder nf;
96
+ s->accept(&nf);
97
+ return nf.bufs();
98
+ }
99
+
100
+ static std::unordered_set<BufPtr> find(ExprPtr e) {
101
+ BufFinder nf;
102
+ e->accept(&nf);
103
+ return nf.bufs();
104
+ }
105
+
106
+ const std::unordered_set<BufPtr>& bufs() {
107
+ return bufs_;
108
+ }
109
+
110
+ private:
111
+ std::unordered_set<BufPtr> bufs_;
112
+ };
113
+
114
+ // Finds all kinds of write operations to the provided Buf.
115
+ class WritesToBuf : public IRVisitor {
116
+ public:
117
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
118
+ WritesToBuf(BufPtr target) : target_(std::move(target)) {}
119
+
120
+ std::vector<StmtPtr> writes() {
121
+ return writes_;
122
+ }
123
+
124
+ static std::vector<StmtPtr> find(StmtPtr s, BufPtr b) {
125
+ WritesToBuf finder(std::move(b));
126
+ s->accept(&finder);
127
+ return finder.writes();
128
+ }
129
+
130
+ private:
131
+ void visit(StorePtr v) override {
132
+ if (v->buf() == target_) {
133
+ writes_.push_back(v);
134
+ }
135
+ }
136
+
137
+ void visit(AtomicAddPtr v) override {
138
+ if (v->buf() == target_) {
139
+ writes_.push_back(v);
140
+ }
141
+ }
142
+
143
+ BufPtr target_;
144
+ std::vector<StmtPtr> writes_;
145
+ };
146
+
147
+ class StmtsReadingBuf : public IRVisitor {
148
+ public:
149
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
150
+ StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {}
151
+
152
+ std::vector<StmtPtr> reads() {
153
+ return reads_;
154
+ }
155
+
156
+ static std::vector<StmtPtr> find(StmtPtr s, BufPtr b) {
157
+ StmtsReadingBuf finder(std::move(b));
158
+ s->accept(&finder);
159
+ return finder.reads();
160
+ }
161
+
162
+ private:
163
+ bool readsBuffer(StmtPtr s) {
164
+ auto loads = NodeFinder<Load>::find(std::move(s));
165
+ for (const auto& l : loads) {
166
+ if (l->buf() == target_) {
167
+ return true;
168
+ }
169
+ }
170
+ return false;
171
+ }
172
+
173
+ void visit(StorePtr v) override {
174
+ if (readsBuffer(v)) {
175
+ reads_.push_back(v);
176
+ }
177
+ }
178
+
179
+ void visit(LetPtr v) override {
180
+ if (readsBuffer(v)) {
181
+ reads_.push_back(v);
182
+ }
183
+ }
184
+
185
+ void visit(CondPtr v) override {
186
+ if (readsBuffer(v)) {
187
+ reads_.push_back(v);
188
+ }
189
+ }
190
+
191
+ void visit(AtomicAddPtr v) override {
192
+ if (readsBuffer(v)) {
193
+ reads_.push_back(v);
194
+ }
195
+ }
196
+
197
+ BufPtr target_;
198
+ std::vector<StmtPtr> reads_;
199
+ };
200
+
201
+ class ExternalAllocBufFinder : public IRVisitor {
202
+ public:
203
+ void visit(ExternalCallWithAllocPtr v) override {
204
+ const auto& bufs_out = v->buf_out_args();
205
+ bufs_.insert(bufs_out.begin(), bufs_out.end());
206
+ IRVisitor::visit(std::move(v));
207
+ }
208
+
209
+ static std::unordered_set<BufPtr> find(StmtPtr s) {
210
+ ExternalAllocBufFinder f;
211
+ s->accept(&f);
212
+ return f.bufs();
213
+ }
214
+
215
+ static std::unordered_set<BufPtr> find(ExprPtr e) {
216
+ ExternalAllocBufFinder f;
217
+ e->accept(&f);
218
+ return f.bufs();
219
+ }
220
+
221
+ const std::unordered_set<BufPtr>& bufs() {
222
+ return bufs_;
223
+ }
224
+
225
+ private:
226
+ std::unordered_set<BufPtr> bufs_;
227
+ };
228
+
229
+ // Traverses the IR to determine if a particular Var is modified within it.
230
+ class ModifiesVarChecker : public IRVisitor {
231
+ public:
232
+ ModifiesVarChecker(VarPtr v) : var_(std::move(v)) {}
233
+
234
+ static bool check(StmtPtr s, VarPtr v) {
235
+ ModifiesVarChecker checker(std::move(v));
236
+ s->accept(&checker);
237
+ return checker.found();
238
+ }
239
+
240
+ bool found() {
241
+ return found_;
242
+ }
243
+
244
+ private:
245
+ void visit(StorePtr v) override {
246
+ if (v->buf()->base_handle() == var_) {
247
+ found_ = true;
248
+ return;
249
+ }
250
+ IRVisitor::visit(std::move(v));
251
+ }
252
+
253
+ void visit(AtomicAddPtr v) override {
254
+ if (v->buf()->base_handle() == var_) {
255
+ found_ = true;
256
+ return;
257
+ }
258
+ IRVisitor::visit(std::move(v));
259
+ }
260
+
261
+ void visit(LetPtr v) override {
262
+ if (v->var() == var_) {
263
+ found_ = true;
264
+ return;
265
+ }
266
+ IRVisitor::visit(std::move(v));
267
+ }
268
+
269
+ void visit(ForPtr v) override {
270
+ if (v->var() == var_) {
271
+ found_ = true;
272
+ return;
273
+ }
274
+ IRVisitor::visit(std::move(v));
275
+ }
276
+
277
+ VarPtr var_;
278
+ bool found_{false};
279
+ };
280
+
281
+ // Traverse the Block stmt to identify the live range of the specified buf. The
282
+ // live range, indicated by a pair of integers, specifies the first and last
283
+ // stmt in block stmts that access to the buf.
284
+ class BufLiveRange : public IRVisitor {
285
+ public:
286
+ BufLiveRange(BufPtr b) : buf_(std::move(b)) {}
287
+
288
+ static std::tuple<int32_t, int32_t> liveRange(StmtPtr s, BufPtr b) {
289
+ BlockPtr block = to<Block>(std::move(s));
290
+ // We Only analyze buffer live ranges for block stmts.
291
+ if (!block) {
292
+ return std::make_tuple(0, 0);
293
+ }
294
+
295
+ BufLiveRange analyzer(std::move(b));
296
+ block->accept(&analyzer);
297
+ return analyzer.getLiveRange();
298
+ }
299
+
300
+ private:
301
+ std::tuple<int32_t, int32_t> getLiveRange() {
302
+ return std::make_tuple(begin_, end_);
303
+ }
304
+
305
+ bool hasBufReads(StmtPtr s) {
306
+ auto loads1 = NodeFinder<Load>::find(s);
307
+ for (const auto& l : loads1) {
308
+ if (l->buf() == buf_) {
309
+ return true;
310
+ }
311
+ }
312
+ auto loads2 = NodeFinder<ExternalCall>::find(s);
313
+ for (const auto& l : loads2) {
314
+ for (const auto& lb : l->buf_args()) {
315
+ if (lb == buf_) {
316
+ return true;
317
+ }
318
+ }
319
+ }
320
+ auto loads3 = NodeFinder<ExternalCallWithAlloc>::find(std::move(s));
321
+ for (const auto& l : loads3) {
322
+ for (const auto& lb : l->buf_args()) {
323
+ if (lb == buf_) {
324
+ return true;
325
+ }
326
+ }
327
+ }
328
+ return false;
329
+ }
330
+
331
+ bool hasBufWrites(StmtPtr s) {
332
+ auto writes1 = NodeFinder<Store>::find(s);
333
+ for (const auto& w : writes1) {
334
+ if (w->buf() == buf_) {
335
+ return true;
336
+ }
337
+ }
338
+ auto writes2 = NodeFinder<ExternalCall>::find(s);
339
+ for (const auto& w : writes2) {
340
+ if (w->buf() == buf_) {
341
+ return true;
342
+ }
343
+ }
344
+ auto writes3 = NodeFinder<ExternalCallWithAlloc>::find(std::move(s));
345
+ for (const auto& w : writes3) {
346
+ for (const auto& wb : w->buf_out_args()) {
347
+ if (wb == buf_) {
348
+ return true;
349
+ }
350
+ }
351
+ }
352
+ return false;
353
+ }
354
+
355
+ void findAccAndUpdateLiveRange(StmtPtr s) {
356
+ bool has_reads = hasBufReads(s), has_writes = hasBufWrites(std::move(s));
357
+ if (has_reads || has_writes) {
358
+ if (begin_ == -1) {
359
+ begin_ = curr_index_;
360
+ };
361
+ end_ = curr_index_;
362
+ }
363
+ }
364
+
365
+ void visit(BlockPtr v) override {
366
+ for (const StmtPtr& s : *v) {
367
+ curr_index_ += 1;
368
+ findAccAndUpdateLiveRange(s);
369
+ }
370
+ }
371
+
372
+ BufPtr buf_;
373
+ int32_t begin_ = -1;
374
+ int32_t end_ = -1;
375
+ int32_t curr_index_ = -1;
376
+ };
377
+
378
+ // A class that analyzes the given program relevant for Block backend
379
+ // It creates a map of multi dim buffers and their flat versions
380
+ class CreateBufferMap : public IRVisitor {
381
+ public:
382
+ const std::unordered_map<std::string, BufPtr>& getBufferMap() const {
383
+ return map_input_to_tensor_bufs_;
384
+ }
385
+
386
+ private:
387
+ void visit(StorePtr v) override {
388
+ auto load_node = to<Load>(v->value());
389
+ if (load_node) {
390
+ auto t_buf = load_node->buf();
391
+ map_input_to_tensor_bufs_.emplace(t_buf->name_hint(), v->buf());
392
+ } else {
393
+ auto add_node = to<Add>(v->value());
394
+ auto mul_node = to<Mul>(v->value());
395
+ // This means for now, v->value() can be Add or Mul
396
+ TORCH_INTERNAL_ASSERT(add_node || mul_node, buildErrorMessage());
397
+ map_input_to_tensor_bufs_.emplace(v->buf()->name_hint(), v->buf());
398
+ }
399
+ v->value()->accept(this);
400
+ }
401
+ std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
402
+ };
403
+
404
+ } // namespace tensorexpr
405
+ } // namespace jit
406
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <unordered_map>
5
+ #include <unordered_set>
6
+ #include <utility>
7
+
8
+ #include <ATen/ATen.h>
9
+ #include <torch/csrc/jit/resource_guard.h>
10
+ #include <torch/csrc/jit/tensorexpr/analysis.h>
11
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
12
+ #include <torch/csrc/jit/tensorexpr/ir.h>
13
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
14
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
15
+ #include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
16
+
17
+ namespace torch {
18
+ namespace jit {
19
+ namespace tensorexpr {
20
+
21
+ // A class that analyzes the given program relevant for Block backend.
22
+ class BlockAnalysis : public IRVisitor {
23
+ public:
24
+ bool is_buf_store_target(BufPtr buf) const {
25
+ return store_targets_.count(buf) > 0;
26
+ }
27
+
28
+ const std::unordered_set<BufPtr>& loads() const {
29
+ return loads_;
30
+ }
31
+
32
+ const std::unordered_set<BufPtr>& stores() const {
33
+ return store_targets_;
34
+ }
35
+
36
+ int block_size() const {
37
+ return block_size_;
38
+ }
39
+
40
+ bool areBufsInMap(const std::unordered_set<BufPtr>& bufs) const;
41
+
42
+ BufPtr getMultiDimBuf(BufPtr buf) const;
43
+
44
+ std::string getInputName(BufPtr buf) const;
45
+
46
+ std::string getFlatInputName(BufPtr buf) const {
47
+ return getInputName(std::move(buf)) + "_flat";
48
+ }
49
+
50
+ std::unordered_map<std::string, BufPtr> getBufferMap() const {
51
+ return map_input_to_tensor_bufs_;
52
+ }
53
+
54
+ private:
55
+ void visit(StorePtr v) override;
56
+ void visit(LoadPtr v) override;
57
+ void visit(ForPtr v) override;
58
+
59
+ std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
60
+ std::unordered_set<BufPtr> store_targets_;
61
+ std::unordered_set<BufPtr> loads_;
62
+ int block_size_ = 32;
63
+ };
64
+
65
+ // A class that overrides the underlying IRPrinter to produce Block.
66
+ class BlockPrinter : public IRPrinter {
67
+ public:
68
+ BlockPrinter(std::ostream* os, BlockAnalysis* block_analysis)
69
+ : IRPrinter(*os), block_analysis_(block_analysis) {}
70
+
71
+ using IRPrinter::name_manager;
72
+ using IRPrinter::visit;
73
+
74
+ private:
75
+ BlockAnalysis* block_analysis_;
76
+ std::unordered_map<std::string, int> dim_values_map;
77
+ std::vector<std::string> dim_names = {"N", "H", "W", "C"};
78
+ std::vector<std::string> flat_dim_names = {"N", "NH", "NHW", "NHWC"};
79
+ void PrintTensorInfo(const std::unordered_set<BufPtr>& bufs);
80
+ void PrintArguments(const std::unordered_set<BufPtr>& bufs);
81
+ void PrintBufferInfo(const std::unordered_set<BufPtr>& bufs);
82
+ void PrintDistribution(const std::unordered_set<BufPtr>& bufs);
83
+ void PrintLoop(const std::unordered_set<BufPtr>& bufs, bool block_idx = true);
84
+ void PrintReshapeInfo(
85
+ const std::unordered_set<BufPtr>& bufs,
86
+ bool reverse = false);
87
+ void PrintDMAs(const std::unordered_set<BufPtr>& bufs);
88
+ void PrintAdjustBuffers(const std::unordered_set<BufPtr>& bufs);
89
+
90
+ void visit(ForPtr v) override;
91
+ void visit(LoadPtr v) override;
92
+ void visit(StorePtr v) override;
93
+ void visit(BlockPtr v) override;
94
+ void visit(AddPtr v) override;
95
+ void visit(MulPtr v) override;
96
+ };
97
+
98
+ class TORCH_API BlockCodeGen : public CodeGen {
99
+ public:
100
+ template <typename... Ts>
101
+ /* implicit */
102
+ BlockCodeGen(StmtPtr stmt, Ts... ts)
103
+ : CodeGen(
104
+ stmt,
105
+ std::vector<BufferArg>({BufferArg(ts)...}),
106
+ at::Device(at::kCPU)) {
107
+ Initialize();
108
+ }
109
+
110
+ BlockCodeGen(
111
+ StmtPtr stmt,
112
+ const std::vector<BufferArg>& buffer_args,
113
+ at::Device device = at::Device(at::kCPU),
114
+ const std::string& kernel_func_name = "func")
115
+ : CodeGen(stmt, buffer_args, device, kernel_func_name) {
116
+ Initialize();
117
+ }
118
+
119
+ ~BlockCodeGen() override;
120
+
121
+ void call(const std::vector<CallArg>& args) override;
122
+ void call_raw(const std::vector<void*>& args) override;
123
+
124
+ void Initialize();
125
+
126
+ std::string getCodeText(const std::string& attr = "") override {
127
+ return oss_.str();
128
+ }
129
+
130
+ private:
131
+ UniqueNameManager* name_manager() {
132
+ if (!printer_) {
133
+ throw std::runtime_error("Null IRPrinter is not expected");
134
+ }
135
+ return printer_->name_manager();
136
+ }
137
+
138
+ std::ostream& os() {
139
+ return printer_->os();
140
+ }
141
+
142
+ std::ostringstream oss_;
143
+ std::unique_ptr<BlockPrinter> printer_;
144
+ std::unique_ptr<BlockAnalysis> block_analysis_;
145
+
146
+ std::string GetUniqueFuncName(const std::string& func_prefix);
147
+ };
148
+ } // namespace tensorexpr
149
+ } // namespace jit
150
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <map>
4
+ #include <unordered_map>
5
+ #include <vector>
6
+
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/tensorexpr/mem_dependency_checker.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+
14
+ class Expr;
15
+ class Buf;
16
+ class Stmt;
17
+
18
+ enum C10_API_ENUM TensorAccessKind { kLoad, kStore, kMutate };
19
+
20
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
21
+ struct TORCH_API TensorAccessBoundsInfo {
22
+ TensorAccessKind kind;
23
+ std::vector<ExprPtr> start;
24
+ std::vector<ExprPtr> stop;
25
+ };
26
+
27
+ using BoundsInfo =
28
+ std::unordered_map<BufPtr, std::vector<TensorAccessBoundsInfo>>;
29
+
30
+ TORCH_API BoundsInfo inferBounds(StmtPtr s, bool distinctAccessKinds = true);
31
+
32
+ // Bounds inference caching the analysis. The MemDependencyChecker must already
33
+ // have been run.
34
+ TORCH_API BoundsInfo getInferredBounds(
35
+ analysis::MemDependencyChecker& analyzer,
36
+ StmtPtr s,
37
+ bool distinctAccessKinds = true);
38
+ TORCH_API BoundsInfo getInferredBounds(
39
+ analysis::MemDependencyChecker& analyzer,
40
+ ExprPtr e,
41
+ bool distinctAccessKinds = true);
42
+
43
+ TORCH_API void printBoundsInfo(const BoundsInfo& v);
44
+
45
+ TORCH_API std::vector<ExprPtr> getBoundExtents(
46
+ const std::vector<TensorAccessBoundsInfo>& infos);
47
+
48
+ // The kind of dependency found, in increasing order of exclusivity.
49
+ enum class HazardKind {
50
+ ReadAfterWrite,
51
+ WriteAfterRead,
52
+ WriteAfterWrite,
53
+ NoDependency,
54
+ };
55
+ TORCH_API HazardKind getPotentialHazards(
56
+ analysis::MemDependencyChecker& analyzer,
57
+ StmtPtr A,
58
+ StmtPtr B);
59
+
60
+ // Returns true if there is a conflicting overlap between accesses in
61
+ // statements A and B. A conflicting overlap is an overlap in buffer accesses
62
+ // where at least one of the accesses is a Store.
63
+ TORCH_API bool hasConflictingOverlap(
64
+ analysis::MemDependencyChecker& analyzer,
65
+ StmtPtr A,
66
+ StmtPtr B);
67
+ // Same as above, between accesses in stores S1 and S2.
68
+ TORCH_API bool isOverlapping(
69
+ analysis::MemDependencyChecker& analyzer,
70
+ StorePtr S1,
71
+ StorePtr S2);
72
+ // Same as above, between accesses in store S and load L.
73
+ TORCH_API bool isOverlapping(
74
+ analysis::MemDependencyChecker& analyzer,
75
+ StorePtr S,
76
+ LoadPtr L);
77
+
78
+ } // namespace tensorexpr
79
+ } // namespace jit
80
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/expr.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir.h>
5
+
6
+ #include <deque>
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+ namespace analysis {
14
+
15
+ // A simple class containing the start and end of a range in a single dimension.
16
+ struct TORCH_API Bound {
17
+ ExprPtr start{nullptr};
18
+ ExprPtr end{nullptr};
19
+
20
+ // This stores whether or not the start and end of this Bound have previously
21
+ // been swapped. This occurs when the bound is in a loop with a negative
22
+ // stride.
23
+ bool swapped{false};
24
+
25
+ Bound() = default;
26
+ Bound(ExprPtr s, ExprPtr e) : start(std::move(s)), end(std::move(e)) {}
27
+
28
+ void print() const;
29
+ bool equals(const Bound& other) const;
30
+
31
+ // The comparison operators are conservative. If the compare operator returns
32
+ // true, it means that all the elements satisfy the logical expression. But
33
+ // the false does not mean the opposite comparison is satisfied. It could be
34
+ // but not always.
35
+ bool operator==(const Bound& other) const;
36
+ bool operator!=(const Bound& other) const;
37
+ bool operator<(const Bound& other) const;
38
+ bool operator<=(const Bound& other) const;
39
+ bool operator>(const Bound& other) const;
40
+ bool operator>=(const Bound& other) const;
41
+
42
+ void swap() {
43
+ std::swap(start, end);
44
+ swapped = !swapped;
45
+ }
46
+ };
47
+
48
+ struct BoundHash {
49
+ size_t operator()(const Bound& b) const {
50
+ return std::hash<ExprPtr>()(b.start) ^ std::hash<ExprPtr>()(b.end);
51
+ }
52
+ };
53
+
54
+ // The type of overlap found. Each condition is true only if none of the
55
+ // previous conditions hold.
56
+ // ContainedOrEqual: All elements in the Bound A are in the Bound B (this
57
+ // includes the case where the bounds are equal).
58
+ // Contains: All elements in the Bound B are in the Bound B.
59
+ // PartialOverlap: Any elements in the Bound B are in the Bound A.
60
+ // NoOverlap: No elements in the Bound A are in the bound B.
61
+ enum class OverlapKind {
62
+ ContainedOrEqual,
63
+ Contains,
64
+ PartialOverlap,
65
+ NoOverlap
66
+ };
67
+
68
+ // The Bound comparison result.
69
+ // True: Every Bound element always satisfies the given comparison operator
70
+ // False: Every Bound element always does NOT satisfy the given comparison
71
+ // operator
72
+ // NotDetermined: Some elements satisfy the given comparison operator and
73
+ // some elements not
74
+ enum class CmpEvalResult { True, False, NotDetermined };
75
+
76
+ // Returns the kind of overlap between Bound A and Bound A in a single
77
+ // dimension.
78
+ OverlapKind TORCH_API boundOverlap(Bound A, Bound B);
79
+
80
+ // The comparison is conservative and the compare result is deterministic.
81
+ // It means that every element of the Bound to be compared needs to satisfy
82
+ // the given comparison operator.
83
+ CmpEvalResult TORCH_API compareBound(
84
+ const Bound& a,
85
+ const Bound& b,
86
+ const CompareSelectOperation& cmp_op);
87
+
88
+ // A multi dimensional bound representing the bound of a set of indices.
89
+ using IndexBounds = std::vector<Bound>;
90
+
91
+ // Returns true if two IndexBounds are equivalent.
92
+ bool TORCH_API indexBoundsEquals(const IndexBounds& A, const IndexBounds& B);
93
+
94
+ // Flattens a multi dimensional bound to a single dimension. The IndexBounds "a"
95
+ // *must* encapsulate the entire range of the buffer.
96
+ Bound TORCH_API flattenBounds(const IndexBounds& a);
97
+
98
+ // Determines the kind of overlap in X dimensions.
99
+ OverlapKind TORCH_API overlaps(const IndexBounds& a, const IndexBounds& b);
100
+
101
+ // Returns the Bound slices created by subtracing bound B from bound A.
102
+ // Multiple Bounds can be returned in the case where B slices A into two
103
+ // distinct regions with no overlap.
104
+ //
105
+ // For example:
106
+ // subtractBound((0, 10), (2, 4)) => [(0, 1), (5, 10)]
107
+ // bound A: (0, 10)
108
+ // bound B: (2, 4)
109
+ // If we remove slice (2, 4) from the slice (0, 10), we will be left
110
+ // with 2 slices, one at the start (0, 1), and one at the end (5, 10).
111
+ // So, the result of this subtraction is [(0, 1), (5, 10)].
112
+ //
113
+ // Note: this doesn't use IndexBounds because the Bounds returned do not
114
+ // represent multiple different dimensions.
115
+ std::vector<Bound> TORCH_API subtractBound(Bound a, Bound b);
116
+
117
+ // Returns the bound slices created by subtracting the IndexBounds B from A.
118
+ std::vector<IndexBounds> TORCH_API subtractIndicesBounds(
119
+ const IndexBounds& A,
120
+ const IndexBounds& B,
121
+ OverlapKind overlap);
122
+ std::vector<IndexBounds> TORCH_API
123
+ subtractIndicesBounds(const IndexBounds& A, const IndexBounds& B);
124
+
125
+ } // namespace analysis
126
+ } // namespace tensorexpr
127
+ } // namespace jit
128
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir.h>
5
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
6
+
7
+ #include <utility>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ namespace tensorexpr {
12
+
13
+ template <typename T>
14
+ class PaddedBuffer;
15
+
16
+ class TORCH_API CodeGen {
17
+ public:
18
+ class BufferArg;
19
+ class CallArg;
20
+
21
+ template <typename... Ts>
22
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
23
+ CodeGen(StmtPtr stmt, Ts... ts)
24
+ : stmt_(std::move(stmt)), buffer_args_({BufferArg(ts)...}) {}
25
+
26
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
27
+ CodeGen(
28
+ StmtPtr stmt,
29
+ std::vector<BufferArg> buffer_args,
30
+ at::Device device = at::kCPU,
31
+ std::string kernel_func_name = "func");
32
+
33
+ virtual ~CodeGen() = default;
34
+
35
+ StmtPtr stmt() const {
36
+ return stmt_;
37
+ }
38
+
39
+ void set_stmt(StmtPtr s) {
40
+ stmt_ = s;
41
+ }
42
+
43
+ void apply_mutator(IRMutator* mutator) {
44
+ stmt_ = stmt_->accept_mutator(mutator);
45
+ }
46
+
47
+ void apply_visitor(IRVisitor* visitor) {
48
+ stmt_->accept(visitor);
49
+ }
50
+
51
+ std::vector<BufferArg>& buffer_args() {
52
+ return buffer_args_;
53
+ }
54
+
55
+ const std::vector<BufferArg>& buffer_args() const {
56
+ return buffer_args_;
57
+ }
58
+
59
+ at::Device device() {
60
+ return device_;
61
+ }
62
+
63
+ // This function returns the generated code as
64
+ // a string.
65
+ virtual std::string getCodeText(const std::string& attr = "") {
66
+ return ("");
67
+ }
68
+
69
+ // TODO: Figure out how to unify these call interfaces.
70
+
71
+ /// Call a function with a vector of CallArgs, which are tagged
72
+ /// unions that properly type the arguments.
73
+ virtual void call(const std::vector<CallArg>& args) = 0;
74
+
75
+ /// Call a function faster than a regular `call` by assuming that
76
+ /// the generated kernel already knows the type of the arguments, so
77
+ /// they can be type-punned with `void*`s.
78
+ virtual void call_raw(const std::vector<void*>& args) = 0;
79
+
80
+ /// Call a function even faster than a regular call, by assuming
81
+ /// that the number of thread blocks can be derived from `numel` via
82
+ /// a simple division, rather than evaluating an expression.
83
+ virtual void call_with_numel(void** args, int64_t numel);
84
+
85
+ virtual at::Tensor empty_strided(
86
+ c10::IntArrayRef size,
87
+ c10::IntArrayRef stride,
88
+ c10::optional<c10::ScalarType> dtype_opt,
89
+ c10::optional<c10::Layout> layout_opt,
90
+ c10::optional<c10::Device> device_opt,
91
+ c10::optional<bool> pin_memory_opt) {
92
+ return at::empty_strided(
93
+ size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
94
+ }
95
+
96
+ const std::string& kernel_func_name() const {
97
+ return kernel_func_name_;
98
+ }
99
+
100
+ void allocIntermediateBufs();
101
+
102
+ protected:
103
+ static void* argToPtr(const BufferArg& bufferArg, const CallArg& callArg);
104
+
105
+ private:
106
+ StmtPtr stmt_;
107
+ std::vector<BufferArg> buffer_args_;
108
+ at::Device device_ = at::kCPU;
109
+ std::string kernel_func_name_ = "func";
110
+ };
111
+
112
+ class TORCH_API ExtCallMemoryReuse : public IRMutator {
113
+ static std::unordered_map<std::string, std::string> makeExtCallFuncNameMap();
114
+ static const std::unordered_map<std::string, std::string> extCallFuncNameMap_;
115
+
116
+ public:
117
+ explicit ExtCallMemoryReuse(
118
+ const std::vector<CodeGen::BufferArg>& bufferArgs);
119
+ ~ExtCallMemoryReuse() override = default;
120
+ StmtPtr mutate(ExternalCallPtr v) override;
121
+
122
+ private:
123
+ std::unordered_set<BufPtr> bufferArgs_;
124
+ };
125
+
126
+ class CodeGen::BufferArg {
127
+ public:
128
+ BufferArg(const Tensor& tensor) : buf_(tensor.buf()) {}
129
+ BufferArg(const VarHandle& var) : var_(var.node()), isVar_(true) {}
130
+ BufferArg(const BufHandle& buf) : buf_(buf.node()) {}
131
+ BufferArg(BufPtr buf) : buf_(std::move(buf)) {}
132
+
133
+ VarPtr var() const {
134
+ return isVar_ ? var_ : buf_->base_handle();
135
+ }
136
+
137
+ BufPtr buf() const {
138
+ return buf_;
139
+ }
140
+
141
+ bool isVar() const {
142
+ return isVar_;
143
+ }
144
+
145
+ Dtype dtype() const {
146
+ return isVar_ ? var_->dtype() : buf_->dtype();
147
+ }
148
+
149
+ private:
150
+ VarPtr var_ = nullptr;
151
+ BufPtr buf_ = nullptr;
152
+ bool isVar_ = false;
153
+ };
154
+
155
+ class CodeGen::CallArg {
156
+ public:
157
+ template <typename T>
158
+ CallArg(const PaddedBuffer<T>& buffer);
159
+
160
+ template <typename T>
161
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,cppcoreguidelines-pro-type-const-cast)
162
+ CallArg(const std::vector<T>& buffer)
163
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
164
+ : data_(const_cast<T*>(buffer.data())) {}
165
+
166
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
167
+ CallArg(void* ptr) : data_(ptr) {}
168
+
169
+ #define ARG_TYPE_CTOR(Type, Name) \
170
+ CallArg(Type v) { \
171
+ memcpy(buffer_, &v, sizeof(Type)); \
172
+ data_ = (void*)buffer_; \
173
+ }
174
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
175
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_TYPE_CTOR);
176
+ #undef ARG_TYPE_CTOR
177
+
178
+ void* data() const {
179
+ return data_;
180
+ }
181
+
182
+ CallArg(const CallArg& rhs) {
183
+ if (rhs.data_ == rhs.buffer_) {
184
+ memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
185
+ this->data_ = (void*)(this->buffer_);
186
+ } else {
187
+ this->data_ = rhs.data_;
188
+ }
189
+ }
190
+
191
+ CallArg& operator=(const CallArg& rhs) {
192
+ if (rhs.data_ == rhs.buffer_) {
193
+ memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
194
+ this->data_ = (void*)(this->buffer_);
195
+ } else {
196
+ this->data_ = rhs.data_;
197
+ }
198
+ return *this;
199
+ }
200
+
201
+ #define ARG_PTR_DEFINE(Type, Name) \
202
+ Type* Name##Ptr() const { \
203
+ TORCH_INTERNAL_ASSERT(data_ == (void*)buffer_); \
204
+ return (Type*)data_; \
205
+ }
206
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
207
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_PTR_DEFINE);
208
+ #undef ARG_PTR_DEFINE
209
+
210
+ private:
211
+ void* data_;
212
+ // Regarding a scalar value, CallArg uses void**=&data_ to store it. But the
213
+ // bit width of a pointer is 32bit on a 32bit platform. It cannot store the
214
+ // scalar if the bit width of the scalar is larger than 32bit, such as double
215
+ // and long. Hence, we add 8 bytes buffer dedicated to storing the scalar
216
+ // value regardless its bit width is less or greater than 32bits.
217
+ char buffer_[8] = {0}; // 64bits
218
+ };
219
+
220
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
221
+ class RegisterCodeGenList {
222
+ public:
223
+ TORCH_API static RegisterCodeGenList& GetInstance() {
224
+ static RegisterCodeGenList codegen_list;
225
+ return codegen_list;
226
+ }
227
+
228
+ using StmtFactoryMethod = std::function<std::unique_ptr<CodeGen>(
229
+ StmtPtr stmt,
230
+ const std::vector<CodeGen::BufferArg>&,
231
+ at::Device device,
232
+ const std::string& kernel_func_name)>;
233
+
234
+ TORCH_API StmtFactoryMethod FindStmtFactoryMethod(const std::string& name);
235
+ RegisterCodeGenList(const RegisterCodeGenList&) = delete;
236
+ RegisterCodeGenList& operator=(const RegisterCodeGenList&) = delete;
237
+
238
+ private:
239
+ template <class CodeGenType>
240
+ friend class RegisterCodeGen;
241
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
242
+ RegisterCodeGenList() = default;
243
+ TORCH_API void AddStmtFactoryMethod(
244
+ const std::string& name,
245
+ const StmtFactoryMethod& stmt_factory_method);
246
+
247
+ std::unordered_map<std::string, StmtFactoryMethod> stmt_factory_methods_;
248
+ };
249
+
250
+ template <class CodeGenType>
251
+ class RegisterCodeGen {
252
+ public:
253
+ explicit RegisterCodeGen(const std::string& name) {
254
+ RegisterCodeGenList& codegen_list = RegisterCodeGenList::GetInstance();
255
+ codegen_list.AddStmtFactoryMethod(
256
+ name,
257
+ [](StmtPtr stmt,
258
+ const std::vector<CodeGen::BufferArg>& params,
259
+ at::Device device,
260
+ const std::string& kernel_func_name) {
261
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
262
+ std::unique_ptr<CodeGen> method(
263
+ new CodeGenType(stmt, params, device, kernel_func_name));
264
+ return method;
265
+ });
266
+ }
267
+ };
268
+
269
+ TORCH_API std::unique_ptr<CodeGen> CreateCodeGen(
270
+ const std::string& name,
271
+ StmtPtr stmt,
272
+ const std::vector<CodeGen::BufferArg>& params,
273
+ at::Device device = at::kCPU,
274
+ const std::string& kernel_func_name = "func");
275
+
276
+ class TORCH_API GenericIntrinsicsExpander : public IRMutator {
277
+ protected:
278
+ ExprPtr mutate(IntrinsicsPtr v) override;
279
+ };
280
+
281
+ } // namespace tensorexpr
282
+ } // namespace jit
283
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace tensorexpr {
9
+
10
+ class CppVarNameRewriter;
11
+
12
+ // Generates C++ code from the IR.
13
+ //
14
+ // Vector operations are unrolled.
15
+ // For example:
16
+ // C[Ramp(0, 1, 3)] = A[Ramp(0, 2, 3)] + B[Ramp(0, 3, 3)];
17
+ // is unrolled into:
18
+ // C[0] = A[0] + B[0];
19
+ // C[1] = A[2] + B[3];
20
+ // C[2] = A[4] + B[6];
21
+ class TORCH_API CppPrinter : public IRPrinter {
22
+ public:
23
+ explicit CppPrinter(std::ostream* os);
24
+ ~CppPrinter() override;
25
+
26
+ void printPrologue();
27
+
28
+ using IRPrinter::visit;
29
+
30
+ // Binary expressions.
31
+ void visit(ModPtr) override;
32
+ void visit(MaxPtr) override;
33
+ void visit(MinPtr) override;
34
+
35
+ // Conditional expressions.
36
+ void visit(CompareSelectPtr) override;
37
+ void visit(IfThenElsePtr) override;
38
+
39
+ // Tensor operations.
40
+ void visit(AllocatePtr) override;
41
+ void visit(FreePtr) override;
42
+ void visit(LoadPtr) override;
43
+ void visit(StorePtr) override;
44
+
45
+ // Casts.
46
+ void visit(CastPtr) override;
47
+ void visit(BitCastPtr) override;
48
+
49
+ // Calls.
50
+ void visit(IntrinsicsPtr) override;
51
+ void visit(ExternalCallPtr) override;
52
+
53
+ // Vars.
54
+ void visit(LetPtr) override;
55
+ void visit(VarPtr) override;
56
+
57
+ // Vector data types.
58
+ void visit(RampPtr) override;
59
+ void visit(BroadcastPtr) override;
60
+
61
+ private:
62
+ int lane_;
63
+ std::unordered_map<VarPtr, ExprPtr> vector_vars_;
64
+ };
65
+
66
+ class TORCH_API CppCodeGen : public CodeGen {
67
+ public:
68
+ CppCodeGen(
69
+ StmtPtr stmt,
70
+ const std::vector<BufferArg>& buffer_args,
71
+ at::Device device = at::kCPU,
72
+ const std::string& kernel_func_name = "func");
73
+
74
+ ~CppCodeGen() override;
75
+
76
+ void call(const std::vector<CallArg>& args) override;
77
+ void call_raw(const std::vector<void*>& args) override;
78
+
79
+ template <typename... Ts>
80
+ void operator()(const Ts&... ts) {
81
+ call(std::vector<CallArg>({CallArg(ts)...}));
82
+ }
83
+
84
+ std::string getCodeText(const std::string& attr = "") override {
85
+ return oss_.str();
86
+ }
87
+
88
+ private:
89
+ void init();
90
+
91
+ std::ostream& os() {
92
+ return printer_->os();
93
+ }
94
+
95
+ std::ostringstream oss_;
96
+ std::unique_ptr<CppPrinter> printer_;
97
+ std::unique_ptr<CppVarNameRewriter> var_name_rewriter_;
98
+ };
99
+
100
+ } // namespace tensorexpr
101
+ } // namespace jit
102
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ namespace tensorexpr {
6
+
7
+ constexpr auto cpp_intrinsics_definition = R"(
8
+ namespace std {
9
+
10
+ template <typename T,
11
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
12
+ T rsqrt(T v) {
13
+ return 1.0f / std::sqrt(v);
14
+ }
15
+
16
+ template <typename T,
17
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
18
+ T frac(T v) {
19
+ T intpart;
20
+ return std::modf(v, &intpart);
21
+ }
22
+
23
+ template <typename From, typename To>
24
+ To bitcast(const From& v) {
25
+ assert(sizeof(To) == sizeof(From));
26
+ To res;
27
+ std::memcpy(&res, &v, sizeof(From));
28
+ return res;
29
+ }
30
+
31
+ } // namespace std
32
+ )";
33
+
34
+ } // namespace tensorexpr
35
+ } // namespace jit
36
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <unordered_map>
4
+ #include <unordered_set>
5
+
6
+ #include <ATen/ATen.h>
7
+ #include <ATen/cuda/CUDAContext.h>
8
+ #include <ATen/cuda/nvrtc_stub/ATenNVRTC.h>
9
+ #include <c10/cuda/CUDACachingAllocator.h>
10
+ #include <c10/cuda/CUDAGuard.h>
11
+ #include <torch/csrc/jit/resource_guard.h>
12
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
13
+ #include <torch/csrc/jit/tensorexpr/eval.h>
14
+ #include <torch/csrc/jit/tensorexpr/ir.h>
15
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
16
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
17
+ #include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
18
+ #include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
19
+
20
+ namespace torch {
21
+ namespace jit {
22
+ namespace tensorexpr {
23
+
24
+ // A class that analyzes the given program relevant for Cuda backends.
25
+ class CudaAnalysis : public IRVisitor {
26
+ public:
27
+ CudaAnalysis() {
28
+ gpu_block_extents_ = {alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
29
+ gpu_thread_extents_ = {
30
+ alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
31
+ }
32
+ bool is_buf_store_target(BufPtr buf) const {
33
+ return store_targets_.count(buf) > 0;
34
+ }
35
+
36
+ const std::unordered_set<VarPtr>& thread_local_bufs() const {
37
+ return thread_local_bufs_;
38
+ }
39
+
40
+ const std::unordered_set<VarPtr>& cross_block_bufs() const {
41
+ return cross_block_bufs_;
42
+ }
43
+
44
+ const std::vector<ExprPtr>& gpu_block_extents() const {
45
+ return gpu_block_extents_;
46
+ }
47
+
48
+ const std::vector<ExprPtr>& gpu_thread_extents() const {
49
+ return gpu_thread_extents_;
50
+ }
51
+
52
+ private:
53
+ void visit(StorePtr v) override {
54
+ store_targets_.insert(v->buf());
55
+ }
56
+
57
+ void visit(AllocatePtr v) override;
58
+ void visit(FreePtr v) override;
59
+ void visit(PlacementAllocatePtr v) override;
60
+ void visit(ForPtr v) override;
61
+
62
+ std::unordered_set<BufPtr> store_targets_;
63
+ std::unordered_set<VarPtr> thread_local_bufs_;
64
+ std::unordered_set<VarPtr> cross_block_bufs_;
65
+
66
+ std::vector<ExprPtr> gpu_block_extents_;
67
+ std::vector<ExprPtr> gpu_thread_extents_;
68
+ };
69
+
70
+ // An IRMutator that replaces binding loop options with Cuda metavars, and masks
71
+ // statements blocks which should execute with less reach than the launch
72
+ // parameter extent.
73
+ //
74
+ // We do this by segmenting each block into chunks which should have the same
75
+ // execution parameters, then if those params differ from the max mask each dim.
76
+ class GPUMetaVarRewriter : public IRMutator {
77
+ public:
78
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
79
+ explicit GPUMetaVarRewriter(const CudaAnalysis* cuda_analysis)
80
+ : cuda_analysis_(cuda_analysis) {
81
+ gpu_block_vars_ = {
82
+ alloc<Var>("blockIdx.x", kInt),
83
+ alloc<Var>("blockIdx.y", kInt),
84
+ alloc<Var>("blockIdx.z", kInt)};
85
+ gpu_thread_vars_ = {
86
+ alloc<Var>("threadIdx.x", kInt),
87
+ alloc<Var>("threadIdx.y", kInt),
88
+ alloc<Var>("threadIdx.z", kInt)};
89
+
90
+ current_block_reach_ = {
91
+ alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
92
+ current_thread_reach_ = {
93
+ alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
94
+ }
95
+
96
+ StmtPtr mutate(ForPtr v) override;
97
+ StmtPtr mutate(BlockPtr v) override;
98
+
99
+ const std::vector<VarPtr>& gpu_block_vars() const {
100
+ return gpu_block_vars_;
101
+ }
102
+
103
+ const std::vector<VarPtr>& gpu_thread_vars() const {
104
+ return gpu_thread_vars_;
105
+ }
106
+
107
+ const std::vector<ExprPtr>& gpu_block_extents() const {
108
+ return cuda_analysis_->gpu_block_extents();
109
+ }
110
+
111
+ const std::vector<ExprPtr>& gpu_thread_extents() const {
112
+ return cuda_analysis_->gpu_thread_extents();
113
+ }
114
+
115
+ private:
116
+ // When processing a block, stores the contents of each sub-segment.
117
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
118
+ class Segment {
119
+ public:
120
+ void reset(bool mask) {
121
+ stmts_.clear();
122
+ mask_ = mask;
123
+ }
124
+
125
+ bool empty() const {
126
+ return stmts_.empty();
127
+ }
128
+
129
+ std::vector<StmtPtr>& stmts() {
130
+ return stmts_;
131
+ }
132
+ bool mask() {
133
+ return mask_;
134
+ }
135
+
136
+ private:
137
+ std::vector<StmtPtr> stmts_;
138
+ bool mask_{true};
139
+ };
140
+
141
+ // Returns true if the current execution scope is equivalent to the launch
142
+ // parameters.
143
+ bool isFullExtent();
144
+
145
+ std::vector<VarPtr> gpu_block_vars_;
146
+ std::vector<VarPtr> gpu_thread_vars_;
147
+
148
+ std::vector<ExprPtr> current_block_reach_;
149
+ std::vector<ExprPtr> current_thread_reach_;
150
+
151
+ const CudaAnalysis* cuda_analysis_;
152
+ };
153
+
154
+ // A class that overrides the underlying IRPrinter to produce Cuda C.
155
+ class CudaPrinter : public IRPrinter {
156
+ public:
157
+ explicit CudaPrinter(
158
+ std::ostream* os,
159
+ const CudaAnalysis* cuda_analysis,
160
+ bool has_random)
161
+ : IRPrinter(*os), cuda_analysis_(cuda_analysis) {
162
+ if (has_random) {
163
+ rand_func_ = alloc<Var>("rand", kHandle);
164
+ }
165
+ }
166
+
167
+ void visit(CastPtr v) override;
168
+ void visit(IntrinsicsPtr v) override;
169
+ void visit(ForPtr v) override;
170
+
171
+ void visit(LoadPtr v) override;
172
+ void visit(StorePtr v) override;
173
+ void visit(AtomicAddPtr v) override;
174
+ void visit(MaxPtr v) override;
175
+ void visit(MinPtr v) override;
176
+ void visit(IfThenElsePtr v) override;
177
+ void visit(BlockPtr v) override;
178
+ void visit(AllocatePtr v) override;
179
+ void visit(FreePtr v) override;
180
+ void visit(LetPtr v) override;
181
+
182
+ void visit(ExternalCallPtr v) override;
183
+
184
+ VarPtr rand_func() const {
185
+ return rand_func_;
186
+ }
187
+
188
+ std::string dtypeToCppString(const Dtype& dtype) override;
189
+
190
+ using IRPrinter::name_manager;
191
+ using IRPrinter::visit;
192
+
193
+ private:
194
+ VarPtr rand_func_;
195
+ const CudaAnalysis* cuda_analysis_;
196
+
197
+ void print_flat_alloc(AllocatePtr alloc);
198
+ };
199
+
200
+ // Construct Cuda C from the buffer and tensor input, and invoke the kernel
201
+ // when real arguments are provided.
202
+ class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen {
203
+ public:
204
+ template <typename... Ts>
205
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
206
+ CudaCodeGen(StmtPtr stmt, Ts... ts)
207
+ : CodeGen(
208
+ stmt,
209
+ std::vector<BufferArg>({BufferArg(ts)...}),
210
+ at::Device(at::kCUDA, at::cuda::current_device())) {
211
+ Initialize();
212
+ }
213
+
214
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
215
+ CudaCodeGen(
216
+ StmtPtr stmt,
217
+ const std::vector<BufferArg>& buffer_args,
218
+ at::Device device = at::Device(at::kCUDA, at::cuda::current_device()),
219
+ const std::string& kernel_func_name = "func")
220
+ : CodeGen(stmt, buffer_args, device, kernel_func_name) {
221
+ Initialize();
222
+ }
223
+
224
+ ~CudaCodeGen() override;
225
+
226
+ void call(const std::vector<CallArg>& args) override;
227
+ void call_raw(const std::vector<void*>& args) override;
228
+ void call_with_numel(void** args, int64_t numel) override;
229
+
230
+ template <typename... Ts>
231
+ void operator()(const Ts&... ts) {
232
+ call(std::vector<CallArg>({CallArg(ts)...}));
233
+ }
234
+
235
+ at::Tensor empty_strided(
236
+ c10::IntArrayRef size,
237
+ c10::IntArrayRef stride,
238
+ c10::optional<c10::ScalarType> dtype_opt,
239
+ c10::optional<c10::Layout> layout_opt,
240
+ c10::optional<c10::Device> device_opt,
241
+ c10::optional<bool> pin_memory_opt) override;
242
+
243
+ const std::vector<ExprPtr>& gpu_block_extents() const {
244
+ return cuda_analysis_->gpu_block_extents();
245
+ }
246
+
247
+ const std::vector<ExprPtr>& gpu_thread_extents() const {
248
+ return cuda_analysis_->gpu_thread_extents();
249
+ }
250
+
251
+ std::string getCodeText(const std::string& attr = "") override {
252
+ return oss_.str();
253
+ }
254
+
255
+ private:
256
+ void Initialize();
257
+
258
+ void CompileToNVRTC(const std::string& code, const std::string& func_name);
259
+
260
+ UniqueNameManager* name_manager() {
261
+ if (!printer_) {
262
+ throw std::runtime_error("Null IRPrinter is not expected");
263
+ }
264
+ return printer_->name_manager();
265
+ }
266
+
267
+ std::ostream& os() {
268
+ return printer_->os();
269
+ }
270
+
271
+ std::ostringstream oss_;
272
+ std::unique_ptr<CudaPrinter> printer_;
273
+ std::unique_ptr<CudaAnalysis> cuda_analysis_;
274
+ std::unique_ptr<GPUMetaVarRewriter> metavar_rewriter_;
275
+ std::unordered_set<std::string> taken_func_names;
276
+ std::mutex eval_lock_;
277
+ CUfunction function_;
278
+ bool has_random_ = false;
279
+ int thread_block_size_ = -1;
280
+
281
+ std::vector<bool> arg_pos_in_extents_;
282
+ #ifdef TORCH_ENABLE_LLVM
283
+ std::vector<ExprEval<LLVMCodeGen>> block_extents_eval_;
284
+ std::vector<ExprEval<LLVMCodeGen>> thread_extents_eval_;
285
+ #else
286
+ std::vector<ExprEval<SimpleIREvaluator>> block_extents_eval_;
287
+ std::vector<ExprEval<SimpleIREvaluator>> thread_extents_eval_;
288
+ #endif
289
+
290
+ std::string GetUniqueFuncName(const std::string& func_prefix);
291
+ };
292
+
293
+ } // namespace tensorexpr
294
+ } // namespace jit
295
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ namespace tensorexpr {
6
+
7
+ constexpr auto philox_random_string = R"(
8
+
9
+ class Philox {
10
+ public:
11
+ __device__ inline Philox(unsigned long long seed,
12
+ unsigned long long subsequence,
13
+ unsigned long long offset) {
14
+ key.x = (unsigned int)seed;
15
+ key.y = (unsigned int)(seed >> 32);
16
+ counter = make_uint4(0, 0, 0, 0);
17
+ counter.z = (unsigned int)(subsequence);
18
+ counter.w = (unsigned int)(subsequence >> 32);
19
+ STATE = 0;
20
+ incr_n(offset / 4);
21
+ }
22
+
23
+ __device__ inline unsigned long operator()() {
24
+ if(STATE == 0) {
25
+ uint4 counter_ = counter;
26
+ uint2 key_ = key;
27
+ for(int i = 0; i < 9; i++) {
28
+ counter_ = single_round(counter_, key_);
29
+ key_.x += (kPhilox10A); key_.y += (kPhilox10B);
30
+ }
31
+ output = single_round(counter_, key_);
32
+ incr();
33
+ }
34
+ unsigned long ret;
35
+ switch(STATE) {
36
+ case 0: ret = output.x; break;
37
+ case 1: ret = output.y; break;
38
+ case 2: ret = output.z; break;
39
+ case 3: ret = output.w; break;
40
+ }
41
+ STATE = (STATE + 1) % 4;
42
+ return ret;
43
+ }
44
+
45
+ private:
46
+ uint4 counter;
47
+ uint4 output;
48
+ uint2 key;
49
+ unsigned int STATE;
50
+ __device__ inline void incr_n(unsigned long long n) {
51
+ unsigned int nlo = (unsigned int)(n);
52
+ unsigned int nhi = (unsigned int)(n >> 32);
53
+ counter.x += nlo;
54
+ if (counter.x < nlo)
55
+ nhi++;
56
+ counter.y += nhi;
57
+ if (nhi <= counter.y)
58
+ return;
59
+ if (++counter.z)
60
+ return;
61
+ ++counter.w;
62
+ }
63
+ __device__ inline void incr() {
64
+ if (++counter.x)
65
+ return;
66
+ if (++counter.y)
67
+ return;
68
+ if (++counter.z)
69
+ return;
70
+ ++counter.w;
71
+ }
72
+ __device__ unsigned int mulhilo32(unsigned int a, unsigned int b,
73
+ unsigned int *result_high) {
74
+ *result_high = __umulhi(a, b);
75
+ return a*b;
76
+ }
77
+
78
+ __device__ inline uint4 single_round(uint4 ctr, uint2 key) {
79
+ unsigned int hi0;
80
+ unsigned int hi1;
81
+ unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);
82
+ unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);
83
+
84
+ uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};
85
+ return ret;
86
+ }
87
+
88
+ static const unsigned long kPhilox10A = 0x9E3779B9;
89
+ static const unsigned long kPhilox10B = 0xBB67AE85;
90
+ static const unsigned long kPhiloxSA = 0xD2511F53;
91
+ static const unsigned long kPhiloxSB = 0xCD9E8D57;
92
+ };
93
+
94
+ // Inverse of 2^32.
95
+ #define M_RAN_INVM32 2.3283064e-10f
96
+ __device__ __inline__ float Uint32ToFloat(unsigned int x) {
97
+ return x * M_RAN_INVM32;
98
+ }
99
+
100
+ )";
101
+
102
+ } // namespace tensorexpr
103
+ } // namespace jit
104
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cmath>
4
+ #include <cstring>
5
+ #include <type_traits>
6
+ #include <unordered_map>
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ #include <c10/macros/Macros.h>
11
+ #include <c10/util/Logging.h>
12
+ #include <c10/util/string_utils.h>
13
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
14
+ #include <torch/csrc/jit/tensorexpr/exceptions.h>
15
+ #include <torch/csrc/jit/tensorexpr/ir.h>
16
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
17
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
18
+ #include <torch/csrc/jit/tensorexpr/types.h>
19
+ #include <torch/csrc/jit/tensorexpr/var_substitutor.h>
20
+
21
+ namespace torch {
22
+ namespace jit {
23
+ namespace tensorexpr {
24
+
25
+ class InterpValue {
26
+ public:
27
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
28
+ InterpValue() : dtype_(kInt) {
29
+ Intvalues.push_back(0);
30
+ }
31
+
32
+ template <typename T>
33
+ InterpValue(Dtype dtype, T v) : dtype_(dtype) {
34
+ #define TYPE_CASE(Type, Name) \
35
+ if (dtype == k##Name) { \
36
+ Name##values.push_back(v); \
37
+ return; \
38
+ }
39
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
40
+ #undef TYPE_CASE
41
+ throw unsupported_dtype();
42
+ }
43
+
44
+ #define VALUE_CTOR(Type, Name) \
45
+ InterpValue(Type v) : dtype_(k##Name) { \
46
+ Name##values.push_back(v); \
47
+ }
48
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
49
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_CTOR);
50
+ #undef VALUE_CTOR
51
+
52
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
53
+ explicit InterpValue(c10::quint8 v) : dtype_(kQUInt8) {
54
+ QUInt8values.emplace_back(v.val_);
55
+ }
56
+
57
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
58
+ explicit InterpValue(c10::qint8 v) : dtype_(kQInt8) {
59
+ QInt8values.emplace_back(v.val_);
60
+ }
61
+
62
+ #define VALUE_VEC_CTOR(Type, Name) \
63
+ InterpValue(const std::vector<Type>& v) \
64
+ : dtype_(Dtype(k##Name, v.size())), Name##values(v) {}
65
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
66
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_VEC_CTOR);
67
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
68
+ VALUE_VEC_CTOR(c10::quint8, QUInt8)
69
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
70
+ VALUE_VEC_CTOR(c10::qint8, QInt8)
71
+ #undef VALUE_VEC_CTOR
72
+
73
+ template <typename T>
74
+ T as() const;
75
+
76
+ template <typename T>
77
+ const std::vector<T>& as_vec() const;
78
+
79
+ int64_t intValue() const;
80
+
81
+ Dtype dtype() const {
82
+ return dtype_;
83
+ }
84
+
85
+ private:
86
+ Dtype dtype_;
87
+
88
+ #define VALUE_STORAGE(Type, Name) std::vector<Type> Name##values;
89
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_STORAGE);
90
+ VALUE_STORAGE(c10::qint8, QInt8);
91
+ VALUE_STORAGE(c10::quint8, QUInt8);
92
+ #undef VALUE_STORAGE
93
+ void* ptr;
94
+ };
95
+
96
+ #define VALUE_AS_DISPATCH(Type, Name) \
97
+ template <> \
98
+ inline Type InterpValue::as<Type>() const { \
99
+ if (dtype_ != k##Name) { \
100
+ throw unsupported_dtype(); \
101
+ } \
102
+ return Name##values[0]; \
103
+ }
104
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_DISPATCH);
105
+ VALUE_AS_DISPATCH(c10::quint8, QUInt8);
106
+ VALUE_AS_DISPATCH(c10::qint8, QInt8);
107
+ #undef VALUE_AS_DISPATCH
108
+
109
+ #define VALUE_AS_VEC_DISPATCH(Type, Name) \
110
+ template <> \
111
+ inline const std::vector<Type>& InterpValue::as_vec<Type>() const { \
112
+ if (dtype_.scalar_type() != ScalarType::Name) { \
113
+ throw unsupported_dtype(); \
114
+ } \
115
+ return Name##values; \
116
+ }
117
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_VEC_DISPATCH);
118
+ VALUE_AS_VEC_DISPATCH(c10::quint8, QUInt8);
119
+ VALUE_AS_VEC_DISPATCH(c10::qint8, QInt8);
120
+ #undef VALUE_AS_VEC_DISPATCH
121
+
122
+ template <typename Type>
123
+ auto underlyingValue(Type x) {
124
+ return x;
125
+ }
126
+
127
+ template <>
128
+ inline auto underlyingValue<c10::quint8>(c10::quint8 x) {
129
+ return x.val_;
130
+ }
131
+
132
+ template <>
133
+ inline auto underlyingValue<c10::qint8>(c10::qint8 x) {
134
+ return x.val_;
135
+ }
136
+
137
+ template <typename To, typename From>
138
+ To raw_bitcast(const From& src) {
139
+ TORCH_CHECK(sizeof(To) == sizeof(From), "Invalid bitcast invocation");
140
+ To storage;
141
+ std::memcpy(&storage, &src, sizeof(To));
142
+ return reinterpret_cast<To&>(storage);
143
+ }
144
+
145
+ class SimpleIREvaluatorImpl;
146
+ class TORCH_API SimpleIREvaluator : public CodeGen {
147
+ public:
148
+ SimpleIREvaluator(
149
+ StmtPtr stmt,
150
+ const std::vector<BufferArg>& buffer_args,
151
+ at::Device device = at::kCPU,
152
+ const std::string& kernel_func_name = "func");
153
+
154
+ ~SimpleIREvaluator() override;
155
+
156
+ void call(const std::vector<CallArg>& args) override;
157
+ void call_raw(const std::vector<void*>& args) override;
158
+
159
+ template <typename... Ts>
160
+ void operator()(const Ts&... ts) {
161
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
162
+ std::vector<CallArg> args({CallArg(ts)...});
163
+ call(args);
164
+ }
165
+
166
+ void bindVar(VarPtr v, ExprPtr e);
167
+ InterpValue value() const;
168
+
169
+ private:
170
+ void bindArg(const BufferArg& buf, void* data);
171
+ void expand_intrinsics() {
172
+ GenericIntrinsicsExpander intrinsics_expander;
173
+ apply_mutator(&intrinsics_expander);
174
+ }
175
+
176
+ std::unique_ptr<SimpleIREvaluatorImpl> impl_;
177
+ };
178
+
179
+ template <class CodeGenType>
180
+ class ExprEval {
181
+ public:
182
+ using BufferArg = CodeGen::BufferArg;
183
+ using CallArg = CodeGen::CallArg;
184
+
185
+ template <typename... Ts>
186
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
187
+ ExprEval(const ExprHandle& expr, Ts... ts)
188
+ : ExprEval(expr, {BufferArg(ts)...}) {}
189
+
190
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
191
+ ExprEval(const ExprHandle& expr, const std::vector<BufferArg>& buffer_args)
192
+ : dtype_(expr.dtype()) {
193
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
194
+ std::vector<BufferArg> buffer_args_extended = buffer_args;
195
+ BufHandle ret_buf("ret_val", {1}, dtype_);
196
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
197
+ std::vector<ExprHandle> indices;
198
+ ExprHandle zero = IntImm::make(0);
199
+ for (size_t i = 0; i < ret_buf.ndim(); i++) {
200
+ indices.push_back(zero);
201
+ }
202
+ StmtPtr store_stmt = Store::make(ret_buf, indices, expr);
203
+ buffer_args_extended.emplace_back(ret_buf);
204
+ codegen_.reset(new CodeGenType(store_stmt, buffer_args_extended));
205
+ }
206
+
207
+ template <typename... Ts>
208
+ void operator()(Ts... ts) {
209
+ call(ts...);
210
+ }
211
+
212
+ void operator()(const std::vector<CallArg>& call_args) {
213
+ call(call_args);
214
+ }
215
+
216
+ void bindVar(VarPtr v, ExprPtr e) {
217
+ codegen_->bindVar(v, e);
218
+ }
219
+
220
+ void bindVar(const VarHandle& v, const ExprHandle& e) {
221
+ codegen_->bindVar(v.node(), e.node());
222
+ }
223
+
224
+ template <typename... Ts>
225
+ void call(Ts... ts) {
226
+ call({CallArg(ts)...});
227
+ }
228
+
229
+ void call(const std::vector<CallArg>& call_args) {
230
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
231
+ std::vector<CallArg> call_args_extended = call_args;
232
+ switch (dtype_.scalar_type()) {
233
+ #define TYPE_CASE(Type, Name) \
234
+ case ScalarType::Name: { \
235
+ std::vector<Type> ret_val_arg(1); \
236
+ call_args_extended.push_back(CallArg(ret_val_arg)); \
237
+ codegen_->call(call_args_extended); \
238
+ ret_value_ = InterpValue(ret_val_arg[0]); \
239
+ } break;
240
+ // NOLINTNEXTLINE(modernize-use-emplace)
241
+ AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
242
+ // NOLINTNEXTLINE(modernize-use-emplace)
243
+ TYPE_CASE(c10::quint8, QUInt8);
244
+ // NOLINTNEXTLINE(modernize-use-emplace)
245
+ TYPE_CASE(c10::qint8, QInt8);
246
+ #undef TYPE_CASE
247
+ case ScalarType::Bool: {
248
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
249
+ std::vector<unsigned char> ret_val_arg(1);
250
+ call_args_extended.emplace_back(ret_val_arg.data());
251
+ codegen_->call(call_args_extended);
252
+ ret_value_ = InterpValue((bool)ret_val_arg[0]);
253
+ } break;
254
+ default:
255
+ throw unsupported_dtype();
256
+ }
257
+ }
258
+
259
+ void call_raw(const std::vector<void*>& args) {
260
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
261
+ std::vector<void*> args_extended = args;
262
+ switch (dtype_.scalar_type()) {
263
+ #define TYPE_CASE(Type, Name) \
264
+ case ScalarType::Name: { \
265
+ std::vector<Type> ret_val_arg(1); \
266
+ args_extended.push_back(ret_val_arg.data()); \
267
+ codegen_->call_raw(args_extended); \
268
+ ret_value_ = InterpValue(ret_val_arg[0]); \
269
+ } break;
270
+ AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
271
+ TYPE_CASE(c10::quint8, QUInt8);
272
+ TYPE_CASE(c10::qint8, QInt8);
273
+ #undef TYPE_CASE
274
+ case ScalarType::Bool: {
275
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
276
+ std::vector<unsigned char> ret_val_arg(1);
277
+ args_extended.push_back(ret_val_arg.data());
278
+ codegen_->call_raw(args_extended);
279
+ ret_value_ = InterpValue((bool)ret_val_arg[0]);
280
+ } break;
281
+ default:
282
+ throw unsupported_dtype();
283
+ }
284
+ }
285
+
286
+ template <typename T>
287
+ T value(const std::vector<void*>& args) {
288
+ call_raw(args);
289
+ return ret_value_.as<T>();
290
+ }
291
+
292
+ template <typename T, typename... Ts>
293
+ T value(Ts... ts) {
294
+ call(std::forward<Ts>(ts)...);
295
+ return ret_value_.as<T>();
296
+ }
297
+
298
+ Dtype dtype() {
299
+ return dtype_;
300
+ }
301
+
302
+ private:
303
+ Dtype dtype_;
304
+ std::unique_ptr<CodeGenType> codegen_;
305
+ InterpValue ret_value_;
306
+ };
307
+
308
+ // Evaluates the given expression and returns an int64_t value if the result of
309
+ // the given expression is int64_t.
310
+ c10::optional<int64_t> evalInt(ExprPtr e);
311
+
312
+ // Substitutes the given vars with their corresponding expressions in the input
313
+ // expression.
314
+ inline ExprPtr Substitute(ExprPtr expr, const VarMapping& var_mapping) {
315
+ VarSubMutator var_sub(var_mapping);
316
+ return expr->accept_mutator(&var_sub);
317
+ }
318
+
319
+ // Substitutes the given vars with their corresponding expressions in the input
320
+ // statement.
321
+ inline StmtPtr Substitute(StmtPtr stmt, const VarMapping& var_mapping) {
322
+ VarSubMutator var_sub(var_mapping);
323
+ return stmt->accept_mutator(&var_sub);
324
+ }
325
+
326
+ // Creates a clone of the input expression and substitutes the given vars with
327
+ // their corresponding expressions in the clone.
328
+ // NOTE: This works because cloning reuses variables and does not create new
329
+ // ones, and `VarMapping` input has variables as the key.
330
+ inline ExprPtr SubstituteInClone(ExprPtr expr, const VarMapping& var_mapping) {
331
+ VarSubMutator var_sub(var_mapping);
332
+ return Expr::clone(std::move(expr))->accept_mutator(&var_sub);
333
+ }
334
+
335
+ // Creates a clone of the input statement and substitutes the given vars with
336
+ // their corresponding expressions in the clone.
337
+ // NOTE: This works because cloning reuses variables and does not create new
338
+ // ones, and `VarMapping` input has variables as the key.
339
+ inline StmtPtr SubstituteInClone(StmtPtr stmt, const VarMapping& var_mapping) {
340
+ VarSubMutator var_sub(var_mapping);
341
+ return Stmt::clone(std::move(stmt))->accept_mutator(&var_sub);
342
+ }
343
+
344
+ } // namespace tensorexpr
345
+ } // namespace jit
346
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
5
+
6
+ #include <sstream>
7
+ #include <stdexcept>
8
+
9
+ // Forward declarations of types
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+ class Expr;
14
+ class Stmt;
15
+ } // namespace tensorexpr
16
+ } // namespace jit
17
+ } // namespace torch
18
+
19
+ // Forward declarations of functions
20
+ namespace std {
21
+ TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr);
22
+ TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr);
23
+ } // namespace std
24
+
25
+ namespace torch {
26
+ namespace jit {
27
+ namespace tensorexpr {
28
+
29
+ class unsupported_dtype : public std::runtime_error {
30
+ public:
31
+ explicit unsupported_dtype() : std::runtime_error("UNSUPPORTED DTYPE") {}
32
+ explicit unsupported_dtype(const std::string& err)
33
+ : std::runtime_error("UNSUPPORTED DTYPE: " + err) {}
34
+ };
35
+
36
+ class out_of_range_index : public std::runtime_error {
37
+ public:
38
+ explicit out_of_range_index() : std::runtime_error("OUT OF RANGE INDEX") {}
39
+ explicit out_of_range_index(const std::string& err)
40
+ : std::runtime_error("OUT OF RANGE INDEX: " + err) {}
41
+ };
42
+
43
+ class unimplemented_lowering : public std::runtime_error {
44
+ public:
45
+ explicit unimplemented_lowering()
46
+ : std::runtime_error("UNIMPLEMENTED LOWERING") {}
47
+ explicit unimplemented_lowering(ExprPtr expr)
48
+ : std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(expr)) {}
49
+ explicit unimplemented_lowering(StmtPtr stmt)
50
+ : std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(stmt)) {}
51
+ };
52
+
53
+ class malformed_input : public std::runtime_error {
54
+ public:
55
+ explicit malformed_input() : std::runtime_error("MALFORMED INPUT") {}
56
+ explicit malformed_input(const std::string& err)
57
+ : std::runtime_error("MALFORMED INPUT: " + err) {}
58
+ explicit malformed_input(ExprPtr expr)
59
+ : std::runtime_error("MALFORMED INPUT: " + std::to_string(expr)) {}
60
+ explicit malformed_input(const std::string& err, ExprPtr expr)
61
+ : std::runtime_error(
62
+ "MALFORMED INPUT: " + err + " - " + std::to_string(expr)) {}
63
+ explicit malformed_input(StmtPtr stmt)
64
+ : std::runtime_error("MALFORMED INPUT: " + std::to_string(stmt)) {}
65
+ explicit malformed_input(const std::string& err, StmtPtr stmt)
66
+ : std::runtime_error(
67
+ "MALFORMED INPUT: " + err + " - " + std::to_string(stmt)) {}
68
+ };
69
+
70
+ class malformed_ir : public std::runtime_error {
71
+ public:
72
+ explicit malformed_ir() : std::runtime_error("MALFORMED IR") {}
73
+ explicit malformed_ir(const std::string& err)
74
+ : std::runtime_error("MALFORMED IR: " + err) {}
75
+ explicit malformed_ir(ExprPtr expr)
76
+ : std::runtime_error("MALFORMED IR: " + std::to_string(expr)) {}
77
+ explicit malformed_ir(const std::string& err, ExprPtr expr)
78
+ : std::runtime_error(
79
+ "MALFORMED IR: " + err + " - " + std::to_string(expr)) {}
80
+ explicit malformed_ir(StmtPtr stmt)
81
+ : std::runtime_error("MALFORMED IR: " + std::to_string(stmt)) {}
82
+ explicit malformed_ir(const std::string& err, StmtPtr stmt)
83
+ : std::runtime_error(
84
+ "MALFORMED IR: " + err + " - " + std::to_string(stmt)) {}
85
+ };
86
+
87
+ TORCH_API std::string buildErrorMessage(const std::string& s = "");
88
+
89
+ } // namespace tensorexpr
90
+ } // namespace jit
91
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * This file implements the core classes for Tensor Expressions.
3
+ *
4
+ * The structure of the expressions is inspired by Halide/TVM IR.
5
+ */
6
+ #pragma once
7
+
8
+ #include <c10/core/MemoryFormat.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
11
+ #include <torch/csrc/jit/tensorexpr/ir_mutator.h>
12
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
13
+ #include <torch/csrc/jit/tensorexpr/types.h>
14
+
15
+ #include <utility>
16
+
17
+ namespace torch {
18
+ namespace jit {
19
+ namespace tensorexpr {
20
+
21
+ enum IRNodeType {
22
+ kPrimitive,
23
+ kAdd,
24
+ kSub,
25
+ kMul,
26
+ kDiv,
27
+ kMod,
28
+ kMax,
29
+ kMin,
30
+ kAnd,
31
+ kOr,
32
+ kLshift,
33
+ kRshift,
34
+ kXor,
35
+ kCompareSelect,
36
+ kCast,
37
+ kBitCast,
38
+ kOther,
39
+ };
40
+
41
+ // The common base between all expression node.
42
+ class TORCH_API Expr : public std::enable_shared_from_this<Expr> {
43
+ public:
44
+ explicit Expr(Dtype dtype, IRNodeType expr_type = kOther)
45
+ : dtype_(dtype), expr_type_(expr_type) {}
46
+ virtual ~Expr() = default;
47
+ Dtype dtype() const {
48
+ return dtype_;
49
+ }
50
+ virtual void accept(IRVisitor* visitor) = 0;
51
+ virtual ExprPtr accept_mutator(IRMutator* mutator) = 0;
52
+
53
+ IRNodeType expr_type() const {
54
+ return expr_type_;
55
+ }
56
+ // Is this a fixed (constant) immediate value.
57
+ virtual bool isConstant() const {
58
+ return false;
59
+ }
60
+
61
+ void set_dtype(Dtype dtype) {
62
+ dtype_ = dtype;
63
+ }
64
+
65
+ /*
66
+ * Make a deep copy of the given expression.
67
+ *
68
+ * All sub-expressions inside the given expressions are also cloned. Note
69
+ * that the variables are not deep-copied since they are immutable.
70
+ */
71
+ static ExprPtr clone(ExprPtr s);
72
+
73
+ protected:
74
+ std::shared_ptr<Expr> getptr() {
75
+ return shared_from_this();
76
+ }
77
+
78
+ private:
79
+ Dtype dtype_;
80
+ IRNodeType expr_type_;
81
+ };
82
+
83
+ // A CRTP pattern to accept visitors for children class,
84
+ // and dispatch back to the children.
85
+ template <class Op, class Base = Expr>
86
+ class ExprNode : public Base {
87
+ public:
88
+ using ExprNodeBase = ExprNode<Op>;
89
+ void accept(IRVisitor* visitor) override {
90
+ visitor->visit(static_to<Op>(Base::getptr()));
91
+ }
92
+ ExprPtr accept_mutator(IRMutator* mutator) override;
93
+ // pass the constructor to the base class
94
+ using Base::Base;
95
+ };
96
+
97
+ // A wrapper object to the underlying ExprNode.
98
+ // Also serves the primary way to build and operate on other expressions.
99
+ class TORCH_API ExprHandle {
100
+ public:
101
+ ExprHandle() = default;
102
+ explicit ExprHandle(ExprPtr node) : base_expr_node_(std::move(node)) {}
103
+
104
+ ExprPtr node() {
105
+ return base_expr_node_;
106
+ }
107
+
108
+ ExprPtr node() const {
109
+ return base_expr_node_;
110
+ }
111
+
112
+ bool empty() const {
113
+ return base_expr_node_ == nullptr;
114
+ }
115
+
116
+ #define IMM_EXPR_DECLARE(Type, Name) ExprHandle(Type v);
117
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_EXPR_DECLARE);
118
+ #undef IMM_EXPR_DECLARE
119
+
120
+ template <class Op>
121
+ NodePtr<Op> AsNode() {
122
+ return to<Op>(this->node());
123
+ }
124
+
125
+ template <class Op>
126
+ NodePtr<Op> AsNode() const {
127
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
128
+ return const_cast<ExprHandle*>(this)->AsNode<Op>();
129
+ }
130
+
131
+ Dtype dtype() const {
132
+ return node()->dtype();
133
+ }
134
+
135
+ // Handling the math operators.
136
+ ExprHandle operator+(const ExprHandle& other) const;
137
+ ExprHandle operator-(const ExprHandle& other) const;
138
+ ExprHandle operator*(const ExprHandle& other) const;
139
+ ExprHandle operator/(const ExprHandle& other) const;
140
+ ExprHandle operator%(const ExprHandle& other) const;
141
+ ExprHandle operator==(const ExprHandle& other) const;
142
+ ExprHandle operator!=(const ExprHandle& other) const;
143
+ ExprHandle operator>(const ExprHandle& other) const;
144
+ ExprHandle operator>=(const ExprHandle& other) const;
145
+ ExprHandle operator<(const ExprHandle& other) const;
146
+ ExprHandle operator<=(const ExprHandle& other) const;
147
+ ExprHandle operator&(const ExprHandle& other) const;
148
+ ExprHandle operator|(const ExprHandle& other) const;
149
+ ExprHandle operator&&(const ExprHandle& other) const;
150
+ ExprHandle operator||(const ExprHandle& other) const;
151
+ ExprHandle operator^(const ExprHandle& other) const;
152
+ ExprHandle operator<<(const ExprHandle& other) const;
153
+ ExprHandle operator>>(const ExprHandle& other) const;
154
+
155
+ private:
156
+ ExprPtr base_expr_node_ = nullptr;
157
+ };
158
+
159
+ // The underlying representation node to a Var.
160
+ // Currently, each Var object represents a unique variable, even though the
161
+ // names might be the same. We should consider add a unique_name as well.
162
+ class TORCH_API Var : public ExprNode<Var> {
163
+ public:
164
+ static ExprHandle make(const std::string& name_hint, Dtype dtype) {
165
+ return ExprHandle(alloc<Var>(name_hint, dtype));
166
+ }
167
+ static ExprHandle make(Dtype dtype) {
168
+ return ExprHandle(alloc<Var>("", dtype));
169
+ }
170
+
171
+ // TODO: unique_name
172
+ const std::string& name_hint() const {
173
+ return name_hint_;
174
+ }
175
+
176
+ void set_name_hint(const std::string& name) {
177
+ name_hint_ = name;
178
+ }
179
+
180
+ void set_name_hint(std::string&& name) {
181
+ name_hint_ = std::move(name);
182
+ }
183
+
184
+ Var(std::string name_hint, Dtype dtype)
185
+ : ExprNodeBase(dtype, kPrimitive), name_hint_(std::move(name_hint)) {}
186
+
187
+ private:
188
+ std::string name_hint_;
189
+ };
190
+
191
+ TORCH_API std::vector<ExprPtr> make_contiguous_strides(
192
+ const std::vector<ExprHandle>& dims);
193
+ TORCH_API std::vector<ExprPtr> make_channels_last_strides(
194
+ const std::vector<ExprHandle>& dims);
195
+
196
+ class TORCH_API Buf : public ExprNode<Buf> {
197
+ public:
198
+ static BufHandle make(const std::vector<ExprHandle>& dims, Dtype dtype);
199
+
200
+ static BufHandle make(
201
+ const std::string& name_hint,
202
+ const std::vector<ExprHandle>& dims,
203
+ const std::vector<ExprHandle>& strides,
204
+ Dtype dtype);
205
+
206
+ static BufHandle make(
207
+ const std::string& name_hint,
208
+ const std::vector<ExprHandle>& dims,
209
+ Dtype dtype,
210
+ c10::optional<ExprHandle> initializer = c10::nullopt,
211
+ c10::optional<std::vector<ExprHandle>> strides = c10::nullopt,
212
+ c10::optional<ExprHandle> qscale = c10::nullopt,
213
+ c10::optional<ExprHandle> qzero = c10::nullopt);
214
+
215
+ // TODO: unique_name
216
+ VarPtr base_handle() const {
217
+ return base_handle_;
218
+ }
219
+ void set_base_handle(VarPtr base_handle) {
220
+ base_handle_ = std::move(base_handle);
221
+ }
222
+
223
+ const std::string& name_hint() const {
224
+ return base_handle_->name_hint();
225
+ }
226
+ void set_name_hint(const std::string& name_hint) {
227
+ base_handle_->set_name_hint(name_hint);
228
+ }
229
+
230
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
231
+ Buf(const std::string& name_hint,
232
+ const std::vector<ExprPtr>& dims,
233
+ Dtype dtype,
234
+ ExprPtr initializer = nullptr,
235
+ c10::optional<std::vector<ExprPtr>> strides = c10::nullopt,
236
+ ExprPtr qscale = nullptr,
237
+ ExprPtr qzero = nullptr)
238
+ : Buf(alloc<Var>(name_hint, kHandle),
239
+ dims,
240
+ dtype,
241
+ std::move(initializer),
242
+ std::move(strides),
243
+ std::move(qscale),
244
+ std::move(qzero)) {}
245
+
246
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
247
+ Buf(VarPtr var,
248
+ std::vector<ExprPtr> dims,
249
+ Dtype dtype,
250
+ ExprPtr initializer = nullptr,
251
+ c10::optional<std::vector<ExprPtr>> strides = c10::nullopt,
252
+ ExprPtr qscale = nullptr,
253
+ ExprPtr qzero = nullptr);
254
+
255
+ size_t ndim() const {
256
+ return dims_.size();
257
+ }
258
+ ExprPtr dim(size_t index) const {
259
+ if (index >= ndim()) {
260
+ throw out_of_range_index();
261
+ }
262
+ return dims_[index];
263
+ }
264
+ std::vector<ExprPtr> dims() const {
265
+ return dims_;
266
+ }
267
+ void set_dims(std::vector<ExprPtr> dims) {
268
+ dims_ = std::move(dims);
269
+ }
270
+
271
+ std::vector<ExprPtr> strides() const {
272
+ return strides_;
273
+ }
274
+
275
+ void set_strides(std::vector<ExprPtr> strides) {
276
+ strides_ = std::move(strides);
277
+ }
278
+
279
+ ExprPtr initializer() const {
280
+ return initializer_;
281
+ };
282
+
283
+ ExprPtr qzero() const {
284
+ return qzero_;
285
+ }
286
+
287
+ ExprPtr qscale() const {
288
+ return qscale_;
289
+ }
290
+
291
+ void set_qzero(ExprPtr qzero) {
292
+ qzero_ = std::move(qzero);
293
+ }
294
+
295
+ void set_qscale(ExprPtr qscale) {
296
+ qscale_ = std::move(qscale);
297
+ }
298
+
299
+ bool hasConstantDims() const {
300
+ for (const auto& d : dims_) {
301
+ if (!d->isConstant()) {
302
+ return false;
303
+ }
304
+ }
305
+ return true;
306
+ }
307
+
308
+ bool is_contiguous(
309
+ at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const;
310
+
311
+ // The channels-last 1d can benefit the performance of some operators like
312
+ // conv1d. But the MemoryFormat enum has not covered this layout yet. Hence,
313
+ // we abstract a dedicated function to check channels-last 1d contiguous.
314
+ //
315
+ // Channels-last 1d:
316
+ // dims: n c l
317
+ // strides(nlc): c*l 1 c
318
+ bool is_channels_last_1d_contiguous() const {
319
+ if (dims_.size() != 3) {
320
+ return false;
321
+ }
322
+ return is_stride_one(1) && is_cont_with(2, 1) && is_cont_with(0, 2);
323
+ }
324
+
325
+ private:
326
+ bool is_cont_with(int cur_dim, int adjacent_dim) const;
327
+ bool is_stride_one(int cur_dim) const;
328
+
329
+ VarPtr base_handle_;
330
+ std::vector<ExprPtr> dims_;
331
+ std::vector<ExprPtr> strides_;
332
+ ExprPtr initializer_;
333
+ // qscale_ and qzero_ are used only for quantized dtypes Bufs: kQUInt8, kQInt8
334
+ ExprPtr qscale_;
335
+ ExprPtr qzero_;
336
+ };
337
+
338
+ class TORCH_API BufHandle : public ExprHandle {
339
+ public:
340
+ BufHandle(
341
+ const std::string& name_hint,
342
+ const std::vector<ExprHandle>& dims,
343
+ Dtype dtype)
344
+ : ExprHandle(Buf::make(name_hint, dims, dtype)) {}
345
+
346
+ BufHandle(
347
+ const std::string& name_hint,
348
+ const std::vector<ExprHandle>& dims,
349
+ const std::vector<ExprHandle>& strides,
350
+ Dtype dtype)
351
+ : ExprHandle(Buf::make(name_hint, dims, strides, dtype)) {}
352
+
353
+ BufHandle(const std::vector<ExprHandle>& dims, Dtype dtype)
354
+ : ExprHandle(Buf::make("_", dims, dtype)) {}
355
+
356
+ explicit BufHandle(Dtype dtype) : ExprHandle(Buf::make("_", {}, dtype)) {}
357
+
358
+ explicit BufHandle(BufPtr node) : ExprHandle(std::move(node)) {}
359
+ BufPtr node() const {
360
+ return static_to<Buf>(ExprHandle::node());
361
+ }
362
+ BufPtr node() {
363
+ return static_to<Buf>(ExprHandle::node());
364
+ }
365
+
366
+ template <typename... Ts>
367
+ inline ExprHandle load(const Ts&... ts) const;
368
+
369
+ template <typename T>
370
+ inline ExprHandle load(const std::vector<T>& args) const;
371
+
372
+ inline ExprHandle load(const std::vector<ExprHandle>& args) const;
373
+
374
+ StorePtr store(const std::vector<ExprHandle>& args, const ExprHandle& val)
375
+ const;
376
+
377
+ bool operator==(const BufHandle& other) const {
378
+ return this->node() == other.node();
379
+ }
380
+ bool operator!=(const BufHandle& other) const {
381
+ return !(*this == other);
382
+ }
383
+
384
+ const std::string& name_hint() const {
385
+ return this->node()->name_hint();
386
+ }
387
+
388
+ bool empty() const {
389
+ return (this->node() == nullptr);
390
+ }
391
+
392
+ size_t ndim() const {
393
+ return node()->ndim();
394
+ }
395
+
396
+ std::vector<ExprHandle> dims() const;
397
+
398
+ ExprHandle dim(size_t index) const {
399
+ return ExprHandle(node()->dim(index));
400
+ }
401
+
402
+ bool is_contiguous(
403
+ at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const {
404
+ return node()->is_contiguous(memory_format);
405
+ }
406
+
407
+ bool is_channels_last_1d_contiguous() const {
408
+ return node()->is_channels_last_1d_contiguous();
409
+ }
410
+ };
411
+
412
+ // An expression to construct the underlying variable node.
413
+ // Note: do not store any info here, since it is often possible to slice this
414
+ // object. For example: VarHandle x('x'); ExprHandle x2 = x;
415
+ class TORCH_API VarHandle : public ExprHandle {
416
+ public:
417
+ // Creates an empty VarHandle whose base Var is set to nullptr.
418
+ VarHandle() : ExprHandle() {}
419
+
420
+ explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {}
421
+
422
+ VarHandle(const std::string& name_hint, Dtype dtype)
423
+ : ExprHandle(Var::make(name_hint, dtype)) {}
424
+
425
+ explicit VarHandle(VarPtr node) : ExprHandle(std::move(node)) {}
426
+
427
+ VarPtr node() const {
428
+ return static_to<Var>(ExprHandle::node());
429
+ }
430
+ bool operator==(const VarHandle& other) const {
431
+ return this->node() == other.node();
432
+ }
433
+ bool operator!=(const VarHandle& other) const {
434
+ return !(*this == other);
435
+ }
436
+
437
+ const std::string& name_hint() const {
438
+ return this->node()->name_hint();
439
+ }
440
+ bool empty() const {
441
+ return (this->node() == nullptr);
442
+ }
443
+ };
444
+
445
+ template <class Op, class Base>
446
+ ExprPtr ExprNode<Op, Base>::accept_mutator(IRMutator* mutator) {
447
+ return mutator->mutate(static_to<Op>(Base::getptr()));
448
+ }
449
+
450
+ inline bool same_node(const ExprHandle& expr1, const ExprHandle& expr2) {
451
+ return expr1.AsNode<Expr>() == expr2.AsNode<Expr>();
452
+ }
453
+
454
+ TORCH_API ExprHandle sin(const ExprHandle& v);
455
+ TORCH_API ExprHandle cos(const ExprHandle& v);
456
+ TORCH_API ExprHandle tan(const ExprHandle& v);
457
+ TORCH_API ExprHandle asin(const ExprHandle& v);
458
+ TORCH_API ExprHandle acos(const ExprHandle& v);
459
+ TORCH_API ExprHandle atan(const ExprHandle& v);
460
+ TORCH_API ExprHandle sinh(const ExprHandle& v);
461
+ TORCH_API ExprHandle cosh(const ExprHandle& v);
462
+ TORCH_API ExprHandle tanh(const ExprHandle& v);
463
+ TORCH_API ExprHandle sigmoid(const ExprHandle& v);
464
+ TORCH_API ExprHandle exp(const ExprHandle& v);
465
+ TORCH_API ExprHandle expm1(const ExprHandle& v);
466
+ TORCH_API ExprHandle abs(const ExprHandle& v);
467
+ TORCH_API ExprHandle log(const ExprHandle& v);
468
+ TORCH_API ExprHandle fast_tanh(const ExprHandle& v);
469
+ TORCH_API ExprHandle fast_sigmoid(const ExprHandle& v);
470
+ TORCH_API ExprHandle fast_log(const ExprHandle& v);
471
+ TORCH_API ExprHandle log_vml(const ExprHandle& v);
472
+ TORCH_API ExprHandle log2(const ExprHandle& v);
473
+ TORCH_API ExprHandle log10(const ExprHandle& v);
474
+ TORCH_API ExprHandle log1p(const ExprHandle& v);
475
+ TORCH_API ExprHandle erf(const ExprHandle& v);
476
+ TORCH_API ExprHandle erfc(const ExprHandle& v);
477
+ TORCH_API ExprHandle sqrt(const ExprHandle& v);
478
+ TORCH_API ExprHandle rsqrt(const ExprHandle& v);
479
+ TORCH_API ExprHandle ceil(const ExprHandle& v);
480
+ TORCH_API ExprHandle floor(const ExprHandle& v);
481
+ TORCH_API ExprHandle round(const ExprHandle& v);
482
+ TORCH_API ExprHandle trunc(const ExprHandle& v);
483
+ TORCH_API ExprHandle frac(const ExprHandle& v);
484
+ TORCH_API ExprHandle lgamma(const ExprHandle& v);
485
+ TORCH_API ExprHandle atan2(const ExprHandle& v1, const ExprHandle& v2);
486
+ TORCH_API ExprHandle pow(const ExprHandle& v1, const ExprHandle& v2);
487
+ TORCH_API ExprHandle fmod(const ExprHandle& v1, const ExprHandle& v2);
488
+ TORCH_API ExprHandle remainder(const ExprHandle& v1, const ExprHandle& v2);
489
+ TORCH_API ExprHandle isnan(const ExprHandle& v1);
490
+ TORCH_API ExprHandle Relu(const ExprHandle& v1);
491
+
492
+ TORCH_API ExprHandle
493
+ ifThenElse(const ExprHandle& c, const ExprHandle& t, const ExprHandle& f);
494
+
495
+ TORCH_API ExprHandle expr_to_vec(ExprHandle v, int lanes);
496
+
497
+ } // namespace tensorexpr
498
+ } // namespace jit
499
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <ATen/Functions.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <cstdint>
8
+ #include <vector>
9
+
10
+ #define FOR_ALL_EXTERNAL_FUNCTIONS(_) \
11
+ _(nnc_aten_adaptive_avg_pool2d) \
12
+ _(nnc_aten_addmm) \
13
+ _(nnc_aten_conv2d) \
14
+ _(nnc_aten_conv1d) \
15
+ _(nnc_aten_conv1d_out) \
16
+ _(nnc_aten_dequantize) \
17
+ _(nnc_aten_dequantize_out) \
18
+ _(nnc_aten_embedding) \
19
+ _(nnc_aten_matmul) \
20
+ _(nnc_aten_mv) \
21
+ _(nnc_aten_mm) \
22
+ _(nnc_aten_mean) \
23
+ _(nnc_aten_max_red) \
24
+ _(nnc_aten_max_red_out) \
25
+ _(nnc_aten_quantized_conv1d) \
26
+ _(nnc_aten_quantized_conv1d_out) \
27
+ _(nnc_aten_quantized_conv2d) \
28
+ _(nnc_aten_quantized_conv2d_out) \
29
+ _(nnc_aten_quantized_conv2d_relu) \
30
+ _(nnc_aten_quantized_conv2d_relu_out) \
31
+ _(nnc_aten_quantized_linear) \
32
+ _(nnc_aten_quantized_linear_out) \
33
+ _(nnc_aten_quantized_linear_relu) \
34
+ _(nnc_aten_quantized_add) \
35
+ _(nnc_aten_quantized_cat) \
36
+ _(nnc_aten_quantized_mul) \
37
+ _(nnc_aten_quantized_mul_out) \
38
+ _(nnc_aten_quantized_mul_scalar) \
39
+ _(nnc_aten_quantized_mul_scalar_out) \
40
+ _(nnc_aten_quantized_relu) \
41
+ _(nnc_aten_quantized_sigmoid) \
42
+ _(nnc_aten_quantized_sigmoid_out) \
43
+ _(nnc_aten_quantize_per_tensor) \
44
+ _(nnc_aten_quantize_per_tensor_out) \
45
+ _(nnc_aten_triangular_solve) \
46
+ _(nnc_aten_upsample_nearest2d) \
47
+ _(nnc_aten_upsample_nearest2d_out) \
48
+ _(nnc_prepacked_conv2d_clamp_run) \
49
+ _(nnc_prepacked_linear_clamp_run)
50
+
51
+ #define DECLARE_EXTERNAL_FUNCTION(NAME) \
52
+ TORCH_API void NAME( \
53
+ int64_t bufs_num, \
54
+ void** buf_data, \
55
+ int64_t* buf_ranks, \
56
+ int64_t* buf_dims, \
57
+ int64_t* buf_strides, \
58
+ int8_t* buf_dtypes, \
59
+ int64_t args_num, \
60
+ int64_t* extra_args);
61
+
62
+ namespace torch {
63
+ namespace jit {
64
+ namespace tensorexpr {
65
+ struct QIData final {
66
+ double scale;
67
+ int64_t zero;
68
+ c10::ScalarType scalarType;
69
+ };
70
+ std::vector<at::Tensor> constructTensors(
71
+ int64_t bufs_num,
72
+ void** buf_data,
73
+ int64_t* buf_ranks,
74
+ int64_t* buf_dims,
75
+ int64_t* buf_strides,
76
+ int8_t* buf_dtypes,
77
+ c10::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
78
+ c10::nullopt);
79
+
80
+ std::vector<at::Tensor> constructTensors2(
81
+ int64_t bufs_in_num,
82
+ void** buf_data,
83
+ int64_t* buf_ranks,
84
+ int64_t* buf_dims,
85
+ int64_t* buf_strides,
86
+ int8_t* buf_dtypes,
87
+ c10::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
88
+ c10::nullopt,
89
+ size_t bufs_out_num = 0);
90
+
91
+ #ifdef C10_MOBILE
92
+ extern "C" {
93
+ #endif
94
+ void DispatchParallel(
95
+ int8_t* func,
96
+ int64_t start,
97
+ int64_t stop,
98
+ int8_t* packed_data) noexcept;
99
+
100
+ FOR_ALL_EXTERNAL_FUNCTIONS(DECLARE_EXTERNAL_FUNCTION)
101
+ #if AT_MKLDNN_ENABLED()
102
+ DECLARE_EXTERNAL_FUNCTION(nnc_mkldnn_prepacked_conv_run);
103
+ #endif
104
+
105
+ TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept;
106
+
107
+ #ifdef C10_MOBILE
108
+ } // extern "C"
109
+ #endif
110
+
111
+ } // namespace tensorexpr
112
+ } // namespace jit
113
+ } // namespace torch
114
+
115
+ #undef DECLARE_EXTERNAL_FUNCTION
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/Parallel.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <cstdint>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace tensorexpr {
11
+
12
+ #ifdef C10_MOBILE
13
+ extern "C" {
14
+ #endif
15
+ void DispatchParallel(
16
+ int8_t* func,
17
+ int64_t start,
18
+ int64_t stop,
19
+ int8_t* packed_data) noexcept;
20
+
21
+ TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept;
22
+
23
+ #ifdef C10_MOBILE
24
+ } // extern "C"
25
+ #endif
26
+
27
+ } // namespace tensorexpr
28
+ } // namespace jit
29
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstdint>
5
+ #include <string>
6
+ #include <unordered_map>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace tensorexpr {
11
+
12
+ // The external functions that could be called from NNC must have the same
13
+ // signature defined by `NNCExternalFunction`.
14
+ //
15
+ // Why this signature?
16
+ // It was picked for two reasons: 1) it should be generic enough to represent
17
+ // most of the ops we might want to call, 2) it should be possible to generate a
18
+ // code for this call in LLVM codegen.
19
+ // The first 5 parameters allow to pass any number of contiguous CPU tensors in
20
+ // case we need to run aten ops (TODO: support different devices). The first
21
+ // buffer in the array is assumed to be the output buffer. We couldn't use
22
+ // `at::Tensor` (or `c10::IValue`) type there directly as it would mean that
23
+ // we'd need to declare it in LLVM codegen in LLVM IR form, which would be very
24
+ // cumbersome and hard to maintain. Note that the dimensions of all tensors are
25
+ // concatenated into a single array buf_dims. We do not need to pass its length,
26
+ // since it can be deduced from total number of buffers and their ranks.
27
+ //
28
+ // The last 2 arguments allow to pass any non-tensor arguments encoded as an
29
+ // array of int64_t values. The way they are encoded is not specified and could
30
+ // be arbitrary - whatever the most convenient for the specific bridge function
31
+ // is.
32
+ //
33
+ // The bridge functions must not throw exceptions - properly propagating them
34
+ // from the generated code is too cumbersome, and thus all calls to functions
35
+ // that could throw must be wrapped with try-catch blocks.
36
+ using NNCExternalFunction = void (*)(
37
+ int64_t bufs_num,
38
+ void** buf_data,
39
+ int64_t* buf_ranks,
40
+ int64_t* buf_dims,
41
+ int64_t* buf_strides,
42
+ int8_t* buf_dtypes,
43
+ int64_t args_num,
44
+ int64_t* extra_args);
45
+
46
+ // Return a global map "function-name" -> "function-pointer" for all registered
47
+ // in NNC external functions
48
+ TORCH_API std::unordered_map<std::string, NNCExternalFunction>&
49
+ getNNCFunctionRegistry();
50
+
51
+ // To register a new external function in NNC one needs to create an instance of
52
+ // this struct
53
+ struct RegisterNNCExternalFunction {
54
+ RegisterNNCExternalFunction(const std::string& name, NNCExternalFunction fn) {
55
+ getNNCFunctionRegistry()[name] = fn;
56
+ }
57
+ };
58
+
59
+ } // namespace tensorexpr
60
+ } // namespace jit
61
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/ScalarType.h>
3
+ #include <memory>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ namespace tensorexpr {
8
+
9
+ template <typename Node>
10
+ using NodePtr = std::shared_ptr<Node>;
11
+
12
+ template <typename To, typename From>
13
+ NodePtr<To> to(NodePtr<From> x) {
14
+ return std::dynamic_pointer_cast<To>(x);
15
+ }
16
+
17
+ template <typename To, typename From>
18
+ NodePtr<To> static_to(NodePtr<From> x) {
19
+ return std::static_pointer_cast<To>(x);
20
+ }
21
+
22
+ template <typename Node, typename... Args>
23
+ NodePtr<Node> alloc(Args&&... args) {
24
+ return std::make_shared<Node>(std::forward<Args>(args)...);
25
+ }
26
+
27
+ class Buf;
28
+ class Expr;
29
+ class Stmt;
30
+ class Var;
31
+
32
+ using BufPtr = NodePtr<Buf>;
33
+ using ExprPtr = NodePtr<Expr>;
34
+ using StmtPtr = NodePtr<Stmt>;
35
+ using VarPtr = NodePtr<Var>;
36
+
37
+ class ExprHandle;
38
+ class VarHandle;
39
+ class BufHandle;
40
+
41
+ class Add;
42
+ class And;
43
+ class BitCast;
44
+ class Broadcast;
45
+ class Cast;
46
+ class CompareSelect;
47
+ class Div;
48
+ class IfThenElse;
49
+ class Intrinsics;
50
+ class Let;
51
+ class Load;
52
+ class Lshift;
53
+ class Max;
54
+ class MaxTerm;
55
+ class Min;
56
+ class MinTerm;
57
+ class Mod;
58
+ class Mul;
59
+ class Or;
60
+ class Polynomial;
61
+ class Ramp;
62
+ class ReduceOp;
63
+ class RoundOff;
64
+ class Rshift;
65
+ class Store;
66
+ class Sub;
67
+ class Term;
68
+ class Xor;
69
+ using AddPtr = NodePtr<Add>;
70
+ using AndPtr = NodePtr<And>;
71
+ using BitCastPtr = NodePtr<BitCast>;
72
+ using BroadcastPtr = NodePtr<Broadcast>;
73
+ using CastPtr = NodePtr<Cast>;
74
+ using CompareSelectPtr = NodePtr<CompareSelect>;
75
+ using DivPtr = NodePtr<Div>;
76
+ using IfThenElsePtr = NodePtr<IfThenElse>;
77
+ using IntrinsicsPtr = NodePtr<Intrinsics>;
78
+ using LetPtr = NodePtr<Let>;
79
+ using LoadPtr = NodePtr<Load>;
80
+ using LshiftPtr = NodePtr<Lshift>;
81
+ using MaxPtr = NodePtr<Max>;
82
+ using MaxTermPtr = NodePtr<MaxTerm>;
83
+ using MinPtr = NodePtr<Min>;
84
+ using MinTermPtr = NodePtr<MinTerm>;
85
+ using ModPtr = NodePtr<Mod>;
86
+ using MulPtr = NodePtr<Mul>;
87
+ using OrPtr = NodePtr<Or>;
88
+ using PolynomialPtr = NodePtr<Polynomial>;
89
+ using RampPtr = NodePtr<Ramp>;
90
+ using ReduceOpPtr = NodePtr<ReduceOp>;
91
+ using RoundOffPtr = NodePtr<RoundOff>;
92
+ using RshiftPtr = NodePtr<Rshift>;
93
+ using StorePtr = NodePtr<Store>;
94
+ using SubPtr = NodePtr<Sub>;
95
+ using TermPtr = NodePtr<Term>;
96
+ using XorPtr = NodePtr<Xor>;
97
+
98
+ class Allocate;
99
+ class AtomicAdd;
100
+ class Block;
101
+ class Cond;
102
+ class ExternalCall;
103
+ class ExternalCallWithAlloc;
104
+ class For;
105
+ class Free;
106
+ class FreeExt;
107
+ class PlacementAllocate;
108
+ class SyncThreads;
109
+ using AllocatePtr = NodePtr<Allocate>;
110
+ using AtomicAddPtr = NodePtr<AtomicAdd>;
111
+ using BlockPtr = NodePtr<Block>;
112
+ using CondPtr = NodePtr<Cond>;
113
+ using ExternalCallPtr = NodePtr<ExternalCall>;
114
+ using ExternalCallWithAllocPtr = NodePtr<ExternalCallWithAlloc>;
115
+ using ForPtr = NodePtr<For>;
116
+ using FreePtr = NodePtr<Free>;
117
+ using FreeExtPtr = NodePtr<FreeExt>;
118
+ using PlacementAllocatePtr = NodePtr<PlacementAllocate>;
119
+ using SyncThreadsPtr = NodePtr<SyncThreads>;
120
+
121
+ #define IMM_DECLARE(Type, Name) \
122
+ class Name##Imm; \
123
+ using Name##ImmPtr = NodePtr<Name##Imm>;
124
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
125
+ #undef IMM_DECLARE
126
+
127
+ } // namespace tensorexpr
128
+ } // namespace jit
129
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ namespace tensorexpr {
8
+
9
+ // Optimize aten::cat ops in the given subgraph.
10
+ //
11
+ // Moving users of cat to its inputs.
12
+ // Cat ops get lowered into multiple loops, one per input. When the result
13
+ // of cat is used by some other op, it results in a situation where inlining
14
+ // of cat does not happen. This in turn results in intermediate buffers
15
+ // being created for the result of cat, since it is not inlined.
16
+ //
17
+ // For example, consider the following graph:
18
+ // graph(%x : Float(10, strides=[1], device=cpu),
19
+ // %y : Float(20, strides=[1], device=cpu)):
20
+ // %dim : int = prim::Constant[value=0]()
21
+ // %xy_list : Tensor[] = prim::ListConstruct(%x, %y)
22
+ // %cat : Float(60, strides=[1], device=cpu) = aten::cat(%xy_list, %dim)
23
+ // %5 : Float(60, strides=[1], device=cpu) = aten::log(%cat)
24
+ // return (%5))IR";
25
+ //
26
+ // This will get lowered into:
27
+ // Allocate(aten_cat);
28
+ // for (...)
29
+ // aten_cat[...] = x[...]
30
+ // for (...)
31
+ // aten_cat[...] = y[...]
32
+ // for (...)
33
+ // aten_log[...] = log(aten_cat[...])
34
+ // Free(aten_cat);
35
+ // Note that aten_cat is not inlined into aten_log and it results in
36
+ // an intermediate buffer allocation as well.
37
+ //
38
+ // Optimization:
39
+ // We move the ops that use the result of `cat` into its inputs whenever
40
+ // possible.
41
+ //
42
+ // The graph above will be transformed to:
43
+ // graph(%x : Float(10, strides=[1], device=cpu),
44
+ // %y : Float(20, strides=[1], device=cpu)):
45
+ // %3 : int = prim::Constant[value=0]()
46
+ // %7 : Float(10, strides=[1], device=cpu) = aten::log(%x)
47
+ // %8 : Float(20, strides=[1], device=cpu) = aten::log(%y)
48
+ // %9 : Tensor[] = prim::ListConstruct(%7, %8)
49
+ // %10 : Float(60, strides=[1], device=cpu) = aten::cat(%9, %3)
50
+ // return (%10)
51
+ //
52
+ // This will get lowered into:
53
+ // for (...)
54
+ // aten_cat[...] = log(x[...])
55
+ // for (...)
56
+ // aten_cat[...] = log(y[...])
57
+ // aten_cat is the output buffer here.
58
+
59
+ bool OptimizeCat(const std::shared_ptr<Graph>& graph);
60
+
61
+ TORCH_API void annotateInputShapes(
62
+ const std::shared_ptr<Graph>& graph,
63
+ const std::vector<c10::optional<at::Tensor>>& example_inputs);
64
+ TORCH_API std::shared_ptr<Graph> removeUnusedSelfArgument(
65
+ const std::shared_ptr<Graph>& graph);
66
+ TORCH_API std::shared_ptr<Graph> removeGraphOutput(
67
+ const std::shared_ptr<Graph>& graph,
68
+ size_t idx);
69
+ TORCH_API std::shared_ptr<Graph> replaceListOutputWithTuple(
70
+ const std::shared_ptr<Graph>& graph);
71
+
72
+ // Perform \p ITERS rounds of "trimming" for the given \p GRAPH.
73
+ //
74
+ // Trimming means that we try to remove a small portion of the graph while
75
+ // keeping it valid. This is useful for debugging when we try to find a minimal
76
+ // example reproducing the issue at hand. When ITERS is 0, the graph remains
77
+ // unchanged, when ITERS is a big number, the graph usually becomes empty.
78
+ TORCH_API std::shared_ptr<Graph> trimGraph(
79
+ const std::shared_ptr<Graph>& graph,
80
+ int64_t iters);
81
+
82
+ // Scan all values in the given graph and replace each dimension with a size Xi
83
+ // present in \p SIZES with a symbolic shape Yi. Return a vector of symbol
84
+ // values [Y0, Y1, .., Yn].
85
+ //
86
+ // For example:
87
+ // Input:
88
+ // graph(%x : Float(10, 20, 30, 40)):
89
+ // %y : Float(10, 20, 30, 40) = aten::relu(%x)
90
+ // return %y
91
+ //
92
+ // If we run makeShapesSymbolic(graph, {20, 40}), then we'll get:
93
+ //
94
+ // graph(%x : Float(10, SS(-3), 30, SS(-5))):
95
+ // %y : Float(10, SS(-3), 30, SS(-5)) = aten::relu(%x)
96
+ // return %y
97
+ //
98
+ // and get {-3, -5} as the return value.
99
+ TORCH_API std::vector<int64_t> makeShapesSymbolic(
100
+ std::shared_ptr<Graph>& graph,
101
+ const std::vector<int64_t>& sizes);
102
+
103
+ // Inspect the graph and report whether it can be converted to TE IR.
104
+ // TODO: add error reporting for graphs that can't be converted.
105
+ TORCH_API bool isGraphCompilable(const std::shared_ptr<Graph>& graph);
106
+
107
+ // Examine the graph and (hackily) fill in missing tensor type info, such as
108
+ // scalar type, device, and strides. Ideally, this should be done by a proper
109
+ // dtype/device/shape propagation passes, but until they are ready we can use
110
+ // this, not always correct, workaround pass.
111
+ TORCH_API void fixupMissingShapeInfo(const std::shared_ptr<Graph>& graph);
112
+
113
+ } // namespace tensorexpr
114
+ } // namespace jit
115
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir.h>
5
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
6
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace tensorexpr {
11
+
12
+ // Walk the Statement looking for Half size loads/stores.
13
+ class HalfChecker : public IRVisitor {
14
+ public:
15
+ HalfChecker(const std::vector<CodeGen::BufferArg>& args) {
16
+ for (const auto& BA : args) {
17
+ hasHalf_ |= BA.dtype().scalar_type() == ScalarType::Half;
18
+ }
19
+ }
20
+
21
+ bool hasHalf() const {
22
+ return hasHalf_;
23
+ }
24
+
25
+ bool hasBFloat16() const {
26
+ return hasBFloat16_;
27
+ }
28
+
29
+ void visit(LoadPtr v) override {
30
+ hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
31
+ hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
32
+ IRVisitor::visit(v);
33
+ }
34
+
35
+ void visit(StorePtr v) override {
36
+ hasHalf_ |= v->buf()->dtype().scalar_type() == ScalarType::Half;
37
+ hasBFloat16_ |= v->buf()->dtype().scalar_type() == ScalarType::BFloat16;
38
+ IRVisitor::visit(v);
39
+ }
40
+
41
+ void visit(HalfImmPtr v) override {
42
+ hasHalf_ = true;
43
+ }
44
+
45
+ void visit(BFloat16ImmPtr v) override {
46
+ hasBFloat16_ = true;
47
+ }
48
+
49
+ void visit(CastPtr v) override {
50
+ hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
51
+ hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
52
+ IRVisitor::visit(v);
53
+ }
54
+
55
+ private:
56
+ bool hasHalf_{false};
57
+ bool hasBFloat16_{false};
58
+ };
59
+
60
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
61
+ class HalfRewriter : public IRMutator {
62
+ ExprPtr mutate(LoadPtr v) override {
63
+ ExprPtr child = IRMutator::mutate(v);
64
+ if (!isHalf(child)) {
65
+ return child;
66
+ }
67
+
68
+ ExprPtr ret = alloc<Cast>(
69
+ child->dtype().cloneWithScalarType(ScalarType::Float), child);
70
+
71
+ inserted_half_casts_.insert(ret);
72
+ return ret;
73
+ }
74
+
75
+ StmtPtr mutate(StorePtr v) override {
76
+ // Since mutation changes the `value()` expression in-place, we need to
77
+ // get the dtype of the `value()` before that is mutated.
78
+ auto newType = v->value()->dtype();
79
+ ExprPtr new_val = v->value()->accept_mutator(this);
80
+ auto bufType = v->buf()->dtype();
81
+
82
+ if (isHalf(newType.scalar_type())) {
83
+ new_val = alloc<Cast>(newType, new_val);
84
+ inserted_half_casts_.insert(new_val);
85
+ }
86
+
87
+ // The scalar_type of value is not Half while the buf is Half
88
+ if (!isHalf(newType.scalar_type()) && isHalf(bufType.scalar_type())) {
89
+ new_val = alloc<Cast>(
90
+ newType.cloneWithScalarType(bufType.scalar_type()), new_val);
91
+ inserted_half_casts_.insert(new_val);
92
+ }
93
+
94
+ v->set_value(new_val);
95
+ return v;
96
+ }
97
+
98
+ ExprPtr mutate(HalfImmPtr v) override {
99
+ return alloc<Cast>(kFloat, v);
100
+ }
101
+
102
+ ExprPtr mutate(BFloat16ImmPtr v) override {
103
+ return alloc<Cast>(kFloat, v);
104
+ }
105
+
106
+ ExprPtr mutate(CastPtr v) override {
107
+ ExprPtr child = v->src_value()->accept_mutator(this);
108
+
109
+ // just don't allow half casts we didn't insert.
110
+ if (isHalf(v)) {
111
+ if (inserted_half_casts_.count(v) < 1) {
112
+ v->set_src_value(child);
113
+ v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
114
+ return v;
115
+ }
116
+ }
117
+
118
+ // Remove Half(Float()) and friends.
119
+ CastPtr cast_child = to<Cast>(child);
120
+ if (cast_child) {
121
+ auto cast_to_double = v->dtype().scalar_type() == ScalarType::Double;
122
+ auto from_half = isHalf(cast_child->src_value());
123
+ // Cannot simplify the double(float(half)) to double(half) as NNC does
124
+ // not support cast BF16 to double directly.
125
+ auto not_cast_half_to_doulbe = !(cast_to_double && from_half);
126
+ if (v->dtype().is_floating_point() &&
127
+ cast_child->dtype().is_floating_point() && not_cast_half_to_doulbe) {
128
+ return alloc<Cast>(v->dtype(), cast_child->src_value());
129
+ }
130
+ }
131
+
132
+ if (child == v->src_value()) {
133
+ return v;
134
+ }
135
+
136
+ return alloc<Cast>(v->dtype(), child);
137
+ }
138
+
139
+ StmtPtr mutate(LetPtr v) override {
140
+ if (isHalf(v->var()->dtype().scalar_type())) {
141
+ VarPtr load_new_var = alloc<Var>(v->var()->name_hint(), kFloat);
142
+ ExprPtr new_value = alloc<Cast>(
143
+ v->var()->dtype().cloneWithScalarType(ScalarType::Float),
144
+ v->value()->accept_mutator(this));
145
+ var_map[v->var()] = load_new_var;
146
+
147
+ return alloc<Let>(load_new_var, new_value);
148
+ }
149
+
150
+ return IRMutator::mutate(v);
151
+ }
152
+
153
+ ExprPtr mutate(VarPtr v) override {
154
+ auto it = var_map.find(v);
155
+ if (it != var_map.end()) {
156
+ return it->second;
157
+ }
158
+
159
+ return v;
160
+ }
161
+
162
+ template <typename T>
163
+ ExprPtr mutateArithmetic(T v) {
164
+ IRMutator::mutate(v);
165
+ if (isHalf(v)) {
166
+ v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
167
+ }
168
+ return v;
169
+ }
170
+
171
+ ExprPtr mutate(AddPtr v) override {
172
+ return mutateArithmetic(v);
173
+ }
174
+ ExprPtr mutate(SubPtr v) override {
175
+ return mutateArithmetic(v);
176
+ }
177
+ ExprPtr mutate(MulPtr v) override {
178
+ return mutateArithmetic(v);
179
+ }
180
+ ExprPtr mutate(DivPtr v) override {
181
+ return mutateArithmetic(v);
182
+ }
183
+ ExprPtr mutate(MaxPtr v) override {
184
+ return mutateArithmetic(v);
185
+ }
186
+ ExprPtr mutate(MinPtr v) override {
187
+ return mutateArithmetic(v);
188
+ }
189
+ ExprPtr mutate(CompareSelectPtr v) override {
190
+ return mutateArithmetic(v);
191
+ }
192
+ ExprPtr mutate(BroadcastPtr v) override {
193
+ return mutateArithmetic(v);
194
+ }
195
+ ExprPtr mutate(IfThenElsePtr v) override {
196
+ return mutateArithmetic(v);
197
+ }
198
+ ExprPtr mutate(IntrinsicsPtr v) override {
199
+ return mutateArithmetic(v);
200
+ }
201
+
202
+ private:
203
+ static bool isHalf(ScalarType st) {
204
+ return st == ScalarType::Half || st == ScalarType::BFloat16;
205
+ }
206
+
207
+ static bool isHalf(ExprPtr v) {
208
+ return isHalf(v->dtype().scalar_type());
209
+ }
210
+
211
+ std::unordered_set<ExprPtr> inserted_half_casts_;
212
+ std::unordered_map<VarPtr, VarPtr> var_map;
213
+ };
214
+
215
+ } // namespace tensorexpr
216
+ } // namespace jit
217
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/ir.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir_printer.h>
5
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
6
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+
14
+ struct TORCH_API SimplifierHashType {
15
+ SimplifierHashType() = default;
16
+ explicit SimplifierHashType(size_t s) : _h(s) {}
17
+
18
+ bool operator==(const SimplifierHashType& other) const;
19
+ bool operator!=(const SimplifierHashType& other) const;
20
+ bool operator<(const SimplifierHashType& other) const;
21
+ bool operator==(const size_t other) const;
22
+ bool operator!=(const size_t other) const;
23
+
24
+ size_t _h{0};
25
+ };
26
+
27
+ } // namespace tensorexpr
28
+ } // namespace jit
29
+ } // namespace torch
30
+
31
+ namespace std {
32
+ template <>
33
+ struct hash<torch::jit::tensorexpr::SimplifierHashType> {
34
+ size_t operator()(const torch::jit::tensorexpr::SimplifierHashType& k) const {
35
+ return k._h;
36
+ }
37
+ };
38
+
39
+ } // namespace std
40
+
41
+ namespace torch {
42
+ namespace jit {
43
+ namespace tensorexpr {
44
+
45
+ #define CACHE_GUARD() \
46
+ if (cachedHash(v)) { \
47
+ return; \
48
+ }
49
+
50
+ class Term;
51
+ class Polynomial;
52
+
53
+ /* Expression hasher providing comparable values representing sub-exprs.
54
+ * Uses memoization to avoid excessive recursion. */
55
+ class TORCH_API HashProvider : public IRVisitor {
56
+ public:
57
+ template <class T>
58
+ SimplifierHashType hash(T e) {
59
+ // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
60
+ e->accept(this);
61
+ return hashOf(e);
62
+ }
63
+
64
+ bool cachedHash(ExprPtr e) {
65
+ return exprToHash_.find(e) != exprToHash_.end();
66
+ }
67
+ bool cachedHash(StmtPtr s) {
68
+ return stmtToHash_.find(s) != stmtToHash_.end();
69
+ }
70
+
71
+ void clearCache() {
72
+ exprToHash_.clear();
73
+ stmtToHash_.clear();
74
+ }
75
+
76
+ void visit(AddPtr v) override;
77
+ void visit(SubPtr v) override;
78
+ void visit(MulPtr v) override;
79
+ void visit(DivPtr v) override;
80
+ void visit(ModPtr v) override;
81
+ void visit(RoundOffPtr v) override;
82
+ void visit(MaxPtr v) override;
83
+ void visit(MinPtr v) override;
84
+ void visit(AndPtr v) override;
85
+ void visit(OrPtr v) override;
86
+ void visit(XorPtr v) override;
87
+ void visit(LshiftPtr v) override;
88
+ void visit(RshiftPtr v) override;
89
+ void visit(CompareSelectPtr v) override;
90
+
91
+ // NOLINTNEXTLINE
92
+ #define IMM_VISIT(Type, Name) \
93
+ void visit(Name##ImmPtr v) override { \
94
+ CACHE_GUARD(); \
95
+ putHash(v, hash_combine(#Name, v->value())); \
96
+ }
97
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_VISIT);
98
+ #undef IMM_VISIT
99
+
100
+ void visit(CastPtr v) override;
101
+ void visit(VarPtr v) override;
102
+ void visit(RampPtr v) override;
103
+ void visit(LoadPtr v) override;
104
+ void visit(StorePtr v) override;
105
+ void visit(BlockPtr v) override;
106
+ void visit(ForPtr v) override;
107
+ void visit(BroadcastPtr v) override;
108
+ void visit(IfThenElsePtr v) override;
109
+ void visit(IntrinsicsPtr v) override;
110
+ void visit(AllocatePtr v) override;
111
+ void visit(FreePtr v) override;
112
+ void visit(CondPtr v) override;
113
+ void visit(TermPtr v) override;
114
+ void visit(PolynomialPtr v) override;
115
+ void visit(MaxTermPtr v) override;
116
+ void visit(MinTermPtr v) override;
117
+
118
+ template <typename... Types>
119
+ SimplifierHashType hash_combine(const Types&... args) {
120
+ SimplifierHashType seed;
121
+ _hash_combine(seed, args...);
122
+ return seed;
123
+ }
124
+
125
+ private:
126
+ SimplifierHashType hashOf(ExprPtr e) {
127
+ auto it = exprToHash_.find(e);
128
+ if (it != exprToHash_.end()) {
129
+ return it->second;
130
+ }
131
+
132
+ // As a failsafe fall back to IRPrinter.
133
+ std::stringstream ss;
134
+ IRPrinter printer(ss);
135
+ e->accept(&printer);
136
+ SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
137
+ putHash(std::move(e), hash);
138
+
139
+ return hash;
140
+ }
141
+
142
+ SimplifierHashType hashOf(StmtPtr s) {
143
+ auto it = stmtToHash_.find(s);
144
+ if (it != stmtToHash_.end()) {
145
+ return it->second;
146
+ }
147
+
148
+ // As a failsafe fall back to IRPrinter.
149
+ std::stringstream ss;
150
+ IRPrinter printer(ss);
151
+ s->accept(&printer);
152
+ SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
153
+ putHash(std::move(s), hash);
154
+
155
+ return hash;
156
+ }
157
+
158
+ // Hash funcs for various types, numbers are random.
159
+ template <typename T>
160
+ void _hash_combine(SimplifierHashType& seed, const T& val) {
161
+ seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
162
+ }
163
+
164
+ void _hash_combine(SimplifierHashType& seed, const char* val) {
165
+ seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
166
+ }
167
+
168
+ // at:::Half doesn't have a prime_number_hash, so cast to short.
169
+ void _hash_combine(SimplifierHashType& seed, const at::Half& val) {
170
+ seed._h ^=
171
+ te_hash((uint16_t)val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
172
+ }
173
+
174
+ void _hash_combine(SimplifierHashType& seed, const Dtype& val) {
175
+ seed._h ^= te_hash(val.ToCppString()) + 0x1f752c19 + (seed._h << 7) +
176
+ (seed._h >> 4);
177
+ }
178
+
179
+ void _hash_combine(SimplifierHashType& seed, ExprPtr e) {
180
+ _hash_combine(seed, hash(std::move(e)));
181
+ }
182
+
183
+ template <typename T, typename... Types>
184
+ void _hash_combine(
185
+ SimplifierHashType& seed,
186
+ const T& val,
187
+ const Types&... args) {
188
+ _hash_combine(seed, val);
189
+ _hash_combine(seed, args...);
190
+ }
191
+
192
+ void putHash(ExprPtr e, SimplifierHashType h) {
193
+ auto res = exprToHash_.emplace(e, h);
194
+ if (res.second == false) {
195
+ // This is always a logic bug since we should check the cache first.
196
+ throw std::runtime_error("hash collision");
197
+ }
198
+ }
199
+ void putHash(StmtPtr s, SimplifierHashType h) {
200
+ auto res = stmtToHash_.emplace(s, h);
201
+ if (res.second == false) {
202
+ // This is always a logic bug since we should check the cache first.
203
+ throw std::runtime_error("hash collision");
204
+ }
205
+ }
206
+
207
+ std::unordered_map<ExprPtr, SimplifierHashType> exprToHash_;
208
+ std::unordered_map<StmtPtr, SimplifierHashType> stmtToHash_;
209
+ UniqueNameManager name_manager_;
210
+
211
+ size_t te_hash(SimplifierHashType val) {
212
+ return val._h;
213
+ }
214
+
215
+ size_t te_hash(int64_t val) {
216
+ // put the thing down.
217
+ size_t h = val ^ 0x647AA4D20C0B;
218
+ // bit flip it.
219
+ size_t h2 = ~h;
220
+ // and reverse byte order.
221
+ size_t h3 = 0;
222
+ for (unsigned int i = 0; i < 64; i += 8) {
223
+ h3 |= ((h2 >> i) & 0xFF) << (64 - i - 8);
224
+ }
225
+ return h3;
226
+ }
227
+
228
+ size_t te_hash(int32_t val) {
229
+ int64_t v2 = val;
230
+ return te_hash(v2);
231
+ }
232
+
233
+ size_t te_hash(uint32_t val) {
234
+ int64_t v2 = val;
235
+ return te_hash(v2);
236
+ }
237
+
238
+ size_t te_hash(uint64_t val) {
239
+ int64_t v2 = val;
240
+ return te_hash(v2);
241
+ }
242
+
243
+ size_t te_hash(int16_t val) {
244
+ int64_t v2 = val;
245
+ return te_hash(v2);
246
+ }
247
+
248
+ size_t te_hash(std::string val) {
249
+ size_t hash{0};
250
+ int64_t intval{0};
251
+ int64_t s = val.size() - 1;
252
+ while (s >= 0) {
253
+ for (unsigned int i = 0; i < 8; ++i) {
254
+ if (s < 0)
255
+ break;
256
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
257
+ int64_t c = val.data()[s];
258
+ intval |= (c << (i * 8));
259
+
260
+ s--;
261
+ }
262
+ hash ^= te_hash(intval);
263
+ intval = 0;
264
+ }
265
+
266
+ return hash;
267
+ }
268
+
269
+ size_t te_hash(double d) {
270
+ // memcpy as type punning. Should be optimized out.
271
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
272
+ int64_t n;
273
+ std::memcpy(&n, &d, sizeof d);
274
+ return te_hash(n);
275
+ }
276
+
277
+ size_t te_hash(float d) {
278
+ // memcpy as type punning. Should be optimized out.
279
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
280
+ int32_t n;
281
+ std::memcpy(&n, &d, sizeof d);
282
+ return te_hash(n);
283
+ }
284
+
285
+ size_t te_hash(at::Half d) {
286
+ // memcpy as type punning. Should be optimized out.
287
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
288
+ int16_t n;
289
+ std::memcpy(&n, &d, sizeof d);
290
+ return te_hash(n);
291
+ }
292
+
293
+ size_t te_hash(at::BFloat16 d) {
294
+ // memcpy as type punning. Should be optimized out.
295
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
296
+ int16_t n;
297
+ std::memcpy(&n, &d, sizeof d);
298
+ return te_hash(n);
299
+ }
300
+ };
301
+
302
+ } // namespace tensorexpr
303
+ } // namespace jit
304
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef TORCH_ENABLE_LLVM
4
+ #include <c10/util/ArrayRef.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace tensorexpr {
9
+
10
+ struct SymbolAddress {
11
+ const char* symbol;
12
+ void* address;
13
+
14
+ SymbolAddress(const char* sym, void* addr) : symbol(sym), address(addr) {}
15
+ };
16
+
17
+ c10::ArrayRef<SymbolAddress> getIntrinsicSymbols();
18
+
19
+ } // namespace tensorexpr
20
+ } // namespace jit
21
+ } // namespace torch
22
+ #endif // TORCH_ENABLE_LLVM
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h ADDED
@@ -0,0 +1,934 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <utility>
5
+ #include <vector>
6
+
7
+ #include <c10/util/string_utils.h>
8
+ #include <torch/csrc/jit/tensorexpr/exceptions.h>
9
+ #include <torch/csrc/jit/tensorexpr/expr.h>
10
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
11
+ #include <torch/csrc/jit/tensorexpr/stmt.h>
12
+
13
+ #include <ATen/core/ivalue.h>
14
+
15
+ namespace torch {
16
+ namespace jit {
17
+ namespace tensorexpr {
18
+
19
+ enum CompareSelectOperation {
20
+ kEQ = 0,
21
+ kGT,
22
+ kGE,
23
+ kLT,
24
+ kLE,
25
+ kNE,
26
+ };
27
+
28
+ enum CompareSelectBias {
29
+ kUnbiased,
30
+ kLikely,
31
+ kUnlikely,
32
+ };
33
+
34
+ inline int getPrecedence(IRNodeType ty) {
35
+ // Match C++ operator precedence rules, since some pretty-print expressions to
36
+ // C++. SEE: https://en.cppreference.com/w/cpp/language/operator_precedence
37
+ switch (ty) {
38
+ case kPrimitive:
39
+ return 0;
40
+ case kCast:
41
+ case kBitCast:
42
+ return 2;
43
+ case kAdd:
44
+ case kSub:
45
+ return 6;
46
+ case kMul:
47
+ case kDiv:
48
+ case kMod:
49
+ return 5;
50
+ case kMax:
51
+ case kMin:
52
+ return 99;
53
+ case kAnd:
54
+ return 11;
55
+ case kOr:
56
+ return 13;
57
+ case kLshift:
58
+ case kRshift:
59
+ return 7;
60
+ case kXor:
61
+ return 12;
62
+ case kCompareSelect:
63
+ return 16;
64
+ default:
65
+ return 99;
66
+ }
67
+ }
68
+
69
+ class TORCH_API Cast : public ExprNode<Cast> {
70
+ public:
71
+ ExprPtr src_value() const {
72
+ return src_value_;
73
+ }
74
+
75
+ void set_src_value(ExprPtr src_value) {
76
+ src_value_ = std::move(src_value);
77
+ }
78
+
79
+ static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
80
+ return ExprHandle(alloc<Cast>(dtype, src_value.node()));
81
+ }
82
+ Cast(Dtype dtype, ExprPtr src_value)
83
+ : ExprNodeBase(dtype, kCast), src_value_(std::move(src_value)) {}
84
+
85
+ bool isConstant() const override {
86
+ return src_value_->isConstant();
87
+ }
88
+
89
+ private:
90
+ ExprPtr src_value_;
91
+ };
92
+
93
+ template <typename T>
94
+ ExprHandle cast(const ExprHandle& src_value) {
95
+ return Cast::make(Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
96
+ }
97
+
98
+ // This is a bitwise cast, akin to bitcast in LLVM
99
+ class TORCH_API BitCast : public ExprNode<BitCast> {
100
+ public:
101
+ ExprPtr src_value() const {
102
+ return src_value_;
103
+ }
104
+
105
+ void set_src_value(ExprPtr src_value) {
106
+ src_value_ = std::move(src_value);
107
+ }
108
+
109
+ static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
110
+ return ExprHandle(alloc<BitCast>(dtype, src_value.node()));
111
+ }
112
+ BitCast(Dtype dtype, ExprPtr src_value)
113
+ : ExprNodeBase(dtype, kBitCast), src_value_(std::move(src_value)) {
114
+ TORCH_CHECK(src_value_->dtype().byte_size() == dtype.byte_size());
115
+ }
116
+
117
+ bool isConstant() const override {
118
+ return src_value_->isConstant();
119
+ }
120
+
121
+ private:
122
+ ExprPtr src_value_;
123
+ };
124
+
125
+ template <typename T>
126
+ ExprHandle bitcast(const ExprHandle& src_value) {
127
+ return BitCast::make(
128
+ Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
129
+ }
130
+
131
+ // Represent the expression node for binary operators.
132
+ // A CRTP pattern to share common code among the operators.
133
+ template <typename Op>
134
+ class BinaryOpNode : public ExprNode<Op> {
135
+ public:
136
+ ExprPtr lhs() const {
137
+ return this->lhs_;
138
+ }
139
+ ExprPtr rhs() const {
140
+ return this->rhs_;
141
+ }
142
+
143
+ void set_lhs(ExprPtr lhs) {
144
+ lhs_ = std::move(lhs);
145
+ }
146
+
147
+ void set_rhs(ExprPtr rhs) {
148
+ rhs_ = std::move(rhs);
149
+ }
150
+
151
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
152
+ return ExprHandle(alloc<Op>(lhs.node(), rhs.node()));
153
+ }
154
+
155
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
156
+ BinaryOpNode(
157
+ ExprPtr lhs_v,
158
+ ExprPtr rhs_v,
159
+ IRNodeType expr_type,
160
+ ScalarType ret_type = ScalarType::Undefined)
161
+ : ExprNode<Op>(
162
+ // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
163
+ BinaryOpDtype(lhs_v->dtype(), rhs_v->dtype(), ret_type),
164
+ expr_type),
165
+ lhs_(CastIfNeeded(std::move(lhs_v), ExprNode<Op>::dtype())),
166
+ rhs_(CastIfNeeded(std::move(rhs_v), ExprNode<Op>::dtype())) {}
167
+
168
+ private:
169
+ static ExprPtr CastIfNeeded(ExprPtr expr, Dtype dst_dtype) {
170
+ if (expr->dtype() == dst_dtype) {
171
+ return expr;
172
+ }
173
+ return Cast::make(dst_dtype, ExprHandle(std::move(expr))).node();
174
+ }
175
+
176
+ ExprPtr lhs_;
177
+ ExprPtr rhs_;
178
+ };
179
+
180
+ namespace detail {
181
+ template <typename T>
182
+ void bin_op_deducer(BinaryOpNode<T>);
183
+ bool bin_op_deducer(...);
184
+ } // namespace detail
185
+
186
+ class TORCH_API Add : public BinaryOpNode<Add> {
187
+ public:
188
+ Add(ExprPtr lhs, ExprPtr rhs)
189
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAdd) {}
190
+ };
191
+
192
+ class TORCH_API Sub : public BinaryOpNode<Sub> {
193
+ public:
194
+ Sub(ExprPtr lhs, ExprPtr rhs)
195
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kSub) {}
196
+ };
197
+
198
+ class TORCH_API Mul : public BinaryOpNode<Mul> {
199
+ public:
200
+ Mul(ExprPtr lhs, ExprPtr rhs)
201
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMul) {}
202
+ };
203
+
204
+ class TORCH_API Div : public BinaryOpNode<Div> {
205
+ public:
206
+ Div(ExprPtr lhs, ExprPtr rhs)
207
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kDiv) {}
208
+ };
209
+
210
+ class TORCH_API Mod : public BinaryOpNode<Mod> {
211
+ public:
212
+ Mod(ExprPtr lhs, ExprPtr rhs)
213
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMod) {}
214
+ };
215
+
216
+ template <typename Op>
217
+ class BitwiseOpNode : public BinaryOpNode<Op> {
218
+ public:
219
+ BitwiseOpNode(ExprPtr lhs, ExprPtr rhs, IRNodeType type)
220
+ : BinaryOpNode<Op>(std::move(lhs), std::move(rhs), type) {}
221
+
222
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
223
+ if (!lhs.dtype().is_integral()) {
224
+ throw unsupported_dtype();
225
+ }
226
+ if (lhs.dtype() != rhs.dtype()) {
227
+ throw malformed_input("lhs/rhs dtype mismatch");
228
+ }
229
+ return BinaryOpNode<Op>::make(lhs, rhs);
230
+ }
231
+ };
232
+
233
+ class TORCH_API And : public BitwiseOpNode<And> {
234
+ public:
235
+ And(ExprPtr lhs, ExprPtr rhs)
236
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAnd) {}
237
+ };
238
+
239
+ class TORCH_API Or : public BitwiseOpNode<Or> {
240
+ public:
241
+ Or(ExprPtr lhs, ExprPtr rhs)
242
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kOr) {}
243
+ };
244
+
245
+ class TORCH_API Xor : public BitwiseOpNode<Xor> {
246
+ public:
247
+ Xor(ExprPtr lhs, ExprPtr rhs)
248
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kXor) {}
249
+ };
250
+
251
+ class TORCH_API Lshift : public BitwiseOpNode<Lshift> {
252
+ public:
253
+ Lshift(ExprPtr lhs, ExprPtr rhs)
254
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kLshift) {}
255
+ };
256
+
257
+ class TORCH_API Rshift : public BitwiseOpNode<Rshift> {
258
+ public:
259
+ Rshift(ExprPtr lhs, ExprPtr rhs)
260
+ : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kRshift) {}
261
+ };
262
+
263
+ // TODO: add TORCH_API
264
+ // Currently adding it results in a compilation error on Windows
265
+ class Max : public BinaryOpNode<Max> {
266
+ private:
267
+ bool propagate_nans_;
268
+
269
+ public:
270
+ Max(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
271
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMax),
272
+ propagate_nans_(propagate_nans) {}
273
+
274
+ bool propagate_nans() const {
275
+ return propagate_nans_;
276
+ }
277
+
278
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
279
+ static ExprHandle make(
280
+ const ExprHandle& lhs,
281
+ const ExprHandle& rhs,
282
+ bool propagate_nans) {
283
+ return ExprHandle(alloc<Max>(lhs.node(), rhs.node(), propagate_nans));
284
+ }
285
+ };
286
+
287
+ // TODO: add TORCH_API
288
+ // Currently adding it results in a compilation error on Windows
289
+ class Min : public BinaryOpNode<Min> {
290
+ private:
291
+ bool propagate_nans_;
292
+
293
+ public:
294
+ Min(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
295
+ : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMin),
296
+ propagate_nans_(propagate_nans) {}
297
+
298
+ bool propagate_nans() const {
299
+ return propagate_nans_;
300
+ }
301
+
302
+ static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
303
+ static ExprHandle make(
304
+ const ExprHandle& lhs,
305
+ const ExprHandle& rhs,
306
+ bool propagate_nans) {
307
+ return ExprHandle(alloc<Min>(lhs.node(), rhs.node(), propagate_nans));
308
+ }
309
+ };
310
+
311
+ // Encode typed immediate values e.g. IntImm, FloatImm.
312
+ #define IMM_DECLARE(Type, Name) \
313
+ class TORCH_API Name##Imm : public ExprNode<Name##Imm> { \
314
+ public: \
315
+ Name##Imm(Type value) \
316
+ : ExprNodeBase(k##Name, kPrimitive), value_(value) {} \
317
+ bool isConstant() const override { \
318
+ return true; \
319
+ } \
320
+ Type value() const { \
321
+ return value_; \
322
+ } \
323
+ static ExprHandle make(Type value) { \
324
+ return ExprHandle(alloc<Name##Imm>(value)); \
325
+ } \
326
+ \
327
+ private: \
328
+ Type value_; \
329
+ };
330
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
331
+ #undef IMM_DECLARE
332
+
333
+ // Get immediate by ScalarType.
334
+ template <typename T>
335
+ ExprPtr getImmediateByType(ScalarType immType, T initialVal) {
336
+ switch (immType) {
337
+ #define TYPE_CASE(Type, Name) \
338
+ case ScalarType::Name: \
339
+ return alloc<Name##Imm>(Type(initialVal));
340
+ // NOLINTNEXTLINE(bugprone-branch-clone)
341
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
342
+ #undef TYPE_CASE
343
+ default:
344
+ throw unsupported_dtype();
345
+ }
346
+ return nullptr;
347
+ }
348
+
349
+ template <typename T>
350
+ ExprPtr getImmediateByType(Dtype dtype, T initialVal) {
351
+ return getImmediateByType<T>(dtype.scalar_type(), initialVal);
352
+ }
353
+
354
+ template <typename T>
355
+ ExprPtr immLike(const ExprPtr& e, T v) {
356
+ return getImmediateByType<T>(e->dtype(), v);
357
+ }
358
+
359
+ template <typename T>
360
+ ExprPtr immLike(const ExprHandle& e, T v) {
361
+ return immLike(e.node(), v);
362
+ }
363
+
364
+ inline c10::optional<int64_t> intValue(const ExprPtr& e) {
365
+ #define TYPE_CASE(Type, Name) \
366
+ if (auto v = to<Name##Imm>(e)) { \
367
+ return v->value(); \
368
+ }
369
+ AT_FORALL_INT_TYPES(TYPE_CASE);
370
+ #undef TYPE_CASE
371
+ return c10::nullopt;
372
+ }
373
+
374
+ inline c10::optional<int64_t> intValue(const ExprHandle& e) {
375
+ return intValue(e.node());
376
+ }
377
+
378
+ template <typename T>
379
+ T immediateAs(const ExprPtr& e) {
380
+ #define TYPE_CASE(Type, Name) \
381
+ if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
382
+ return imm->value(); \
383
+ }
384
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
385
+ #undef TYPE_CASE
386
+ throw unsupported_dtype();
387
+ return 0;
388
+ }
389
+
390
+ template <typename T>
391
+ T immediateAs(const ExprHandle& e) {
392
+ return immediateAs<T>(e.node());
393
+ }
394
+
395
+ template <typename T>
396
+ bool immediateEquals(const ExprPtr& e, T val) {
397
+ #define TYPE_CASE(Type, Name) \
398
+ if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
399
+ return imm->value() == val; \
400
+ }
401
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
402
+ #undef TYPE_CASE
403
+ throw unsupported_dtype();
404
+ return false;
405
+ }
406
+
407
+ TORCH_API bool immediateIsNegative(const ExprPtr& e);
408
+
409
+ TORCH_API bool immediateIsPositive(const ExprPtr& e);
410
+
411
+ TORCH_API bool immediateIsZero(const ExprPtr& e);
412
+
413
+ // Represents a ramp vector node:
414
+ // [base, base + 1 * stride, ... , base + (lanes - 1) * stride]
415
+ class TORCH_API Ramp : public ExprNode<Ramp> {
416
+ public:
417
+ ExprPtr base() const {
418
+ return base_;
419
+ }
420
+ ExprPtr stride() const {
421
+ return stride_;
422
+ }
423
+
424
+ void set_base(ExprPtr base) {
425
+ base_ = std::move(base);
426
+ }
427
+
428
+ void set_stride(ExprPtr stride) {
429
+ stride_ = std::move(stride);
430
+ }
431
+
432
+ static ExprHandle make(
433
+ const ExprHandle& base,
434
+ const ExprHandle& stride,
435
+ int lanes) {
436
+ if (stride.dtype() != base.dtype()) {
437
+ throw malformed_input("Bad stride in Ramp");
438
+ }
439
+ return ExprHandle(alloc<Ramp>(base.node(), stride.node(), lanes));
440
+ }
441
+ int lanes() const {
442
+ return lanes_;
443
+ }
444
+
445
+ Ramp(ExprPtr base, ExprPtr stride, int lanes)
446
+ : ExprNodeBase(Dtype(base->dtype(), lanes)),
447
+ base_(std::move(base)),
448
+ stride_(std::move(stride)),
449
+ lanes_(lanes) {}
450
+
451
+ private:
452
+ ExprPtr base_;
453
+ ExprPtr stride_;
454
+ int lanes_;
455
+ };
456
+
457
+ class TORCH_API Load : public ExprNode<Load> {
458
+ public:
459
+ VarPtr base_handle() const {
460
+ return buf_->base_handle();
461
+ }
462
+ std::vector<ExprPtr> indices() const {
463
+ return indices_;
464
+ }
465
+ ExprPtr flat_index() const {
466
+ TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened.");
467
+ return indices_[0];
468
+ }
469
+ BufPtr buf() const {
470
+ return buf_;
471
+ }
472
+
473
+ void set_buf(BufPtr buf) {
474
+ buf_ = std::move(buf);
475
+ }
476
+
477
+ void set_indices(std::vector<ExprPtr> indices) {
478
+ indices_ = std::move(indices);
479
+ }
480
+
481
+ static ExprHandle make(
482
+ Dtype dtype,
483
+ const BufHandle& buf,
484
+ const std::vector<ExprHandle>& indices);
485
+ static ExprHandle make(
486
+ const BufHandle& buf,
487
+ const std::vector<ExprHandle>& indices);
488
+
489
+ Load(Dtype dtype, BufPtr base_handle, std::vector<ExprPtr> indices);
490
+ Load(BufPtr base_handle, const std::vector<ExprPtr>& indices);
491
+
492
+ private:
493
+ BufPtr buf_;
494
+ std::vector<ExprPtr> indices_;
495
+ };
496
+
497
+ class TORCH_API Broadcast : public ExprNode<Broadcast> {
498
+ public:
499
+ ExprPtr value() const {
500
+ return value_;
501
+ }
502
+
503
+ void set_value(ExprPtr value) {
504
+ value_ = std::move(value);
505
+ }
506
+
507
+ int lanes() const {
508
+ return lanes_;
509
+ }
510
+ static ExprHandle make(const ExprHandle& value, int lanes) {
511
+ return ExprHandle(alloc<Broadcast>(value.node(), lanes));
512
+ }
513
+ Broadcast(ExprPtr value, int lanes)
514
+ : ExprNodeBase(Dtype(value->dtype(), lanes)),
515
+ value_(std::move(value)),
516
+ lanes_(lanes) {}
517
+
518
+ private:
519
+ ExprPtr value_;
520
+ int lanes_;
521
+ };
522
+
523
+ class TORCH_API IfThenElse : public ExprNode<IfThenElse> {
524
+ public:
525
+ ExprPtr condition() const {
526
+ return condition_;
527
+ }
528
+
529
+ // Lazily evaluated only if condition is true
530
+ ExprPtr true_value() const {
531
+ return true_;
532
+ }
533
+
534
+ // Lazily evaluated only if condition is false
535
+ ExprPtr false_value() const {
536
+ return false_;
537
+ }
538
+
539
+ void set_condition(ExprPtr condition) {
540
+ condition_ = std::move(condition);
541
+ }
542
+
543
+ void set_true_value(ExprPtr true_value) {
544
+ true_ = std::move(true_value);
545
+ }
546
+
547
+ void set_false_value(ExprPtr false_value) {
548
+ false_ = std::move(false_value);
549
+ }
550
+
551
+ static ExprHandle make(
552
+ const ExprHandle& c,
553
+ const ExprHandle& t,
554
+ const ExprHandle& f) {
555
+ if (!c.dtype().is_integral()) {
556
+ throw unsupported_dtype();
557
+ }
558
+ if (c.dtype().lanes() != 1) {
559
+ throw unsupported_dtype();
560
+ }
561
+ if (t.dtype() != f.dtype()) {
562
+ throw malformed_input("Bad dtype in IfThenElse");
563
+ }
564
+ return ExprHandle(alloc<IfThenElse>(c.node(), t.node(), f.node()));
565
+ }
566
+
567
+ IfThenElse(ExprPtr c, ExprPtr t, ExprPtr f)
568
+ : ExprNodeBase(t->dtype()),
569
+ condition_(std::move(c)),
570
+ true_(std::move(t)),
571
+ false_(std::move(f)) {}
572
+
573
+ private:
574
+ ExprPtr condition_;
575
+ ExprPtr true_;
576
+ ExprPtr false_;
577
+ };
578
+
579
+ class TORCH_API CompareSelect : public ExprNode<CompareSelect> {
580
+ public:
581
+ CompareSelectOperation compare_select_op() const {
582
+ return compare_op_;
583
+ }
584
+ ExprPtr lhs() const {
585
+ return this->lhs_;
586
+ }
587
+ ExprPtr rhs() const {
588
+ return this->rhs_;
589
+ }
590
+ ExprPtr ret_val1() const {
591
+ return this->ret_val1_;
592
+ }
593
+ ExprPtr ret_val2() const {
594
+ return this->ret_val2_;
595
+ }
596
+
597
+ void set_lhs(ExprPtr lhs) {
598
+ lhs_ = std::move(lhs);
599
+ }
600
+
601
+ void set_rhs(ExprPtr rhs) {
602
+ rhs_ = std::move(rhs);
603
+ }
604
+
605
+ void set_ret_val1(ExprPtr ret_val1) {
606
+ ret_val1_ = std::move(ret_val1);
607
+ }
608
+
609
+ void set_ret_val2(ExprPtr ret_val2) {
610
+ ret_val2_ = std::move(ret_val2);
611
+ }
612
+
613
+ CompareSelectBias bias() const {
614
+ return bias_;
615
+ }
616
+
617
+ static ExprHandle make(
618
+ const ExprHandle& lhs,
619
+ const ExprHandle& rhs,
620
+ CompareSelectOperation cmp_op,
621
+ CompareSelectBias bias = kUnbiased) {
622
+ if (lhs.dtype() != rhs.dtype()) {
623
+ throw malformed_input("bad dtype in CompareSelect");
624
+ }
625
+ return ExprHandle(alloc<CompareSelect>(
626
+ lhs.node(),
627
+ rhs.node(),
628
+ IntImm::make(1).node(),
629
+ IntImm::make(0).node(),
630
+ cmp_op,
631
+ bias));
632
+ }
633
+
634
+ static ExprHandle make(
635
+ const ExprHandle& lhs,
636
+ const ExprHandle& rhs,
637
+ const ExprHandle& ret_val1,
638
+ const ExprHandle& ret_val2,
639
+ CompareSelectOperation cmp_op,
640
+ CompareSelectBias bias = kUnbiased) {
641
+ if (lhs.dtype() != rhs.dtype() || ret_val1.dtype() != ret_val2.dtype()) {
642
+ throw malformed_input("bad dtype in CompareSelect");
643
+ }
644
+ return ExprHandle(alloc<CompareSelect>(
645
+ lhs.node(),
646
+ rhs.node(),
647
+ ret_val1.node(),
648
+ ret_val2.node(),
649
+ cmp_op,
650
+ bias));
651
+ }
652
+
653
+ CompareSelect(
654
+ ExprPtr lhs,
655
+ ExprPtr rhs,
656
+ ExprPtr ret_val1,
657
+ ExprPtr ret_val2,
658
+ CompareSelectOperation cmp_op,
659
+ CompareSelectBias bias = kUnbiased)
660
+ : ExprNodeBase(ret_val1->dtype()),
661
+ lhs_(std::move(lhs)),
662
+ rhs_(std::move(rhs)),
663
+ ret_val1_(std::move(ret_val1)),
664
+ ret_val2_(std::move(ret_val2)),
665
+ compare_op_(cmp_op),
666
+ bias_(bias) {}
667
+
668
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
669
+ CompareSelect(
670
+ ExprPtr lhs,
671
+ ExprPtr rhs,
672
+ CompareSelectOperation cmp_op,
673
+ CompareSelectBias bias = kUnbiased)
674
+ : ExprNodeBase(kInt),
675
+ lhs_(std::move(lhs)),
676
+ rhs_(std::move(rhs)),
677
+ ret_val1_(alloc<IntImm>(1)),
678
+ ret_val2_(alloc<IntImm>(0)),
679
+ compare_op_(cmp_op),
680
+ bias_(bias) {}
681
+
682
+ private:
683
+ ExprPtr lhs_;
684
+ ExprPtr rhs_;
685
+ ExprPtr ret_val1_;
686
+ ExprPtr ret_val2_;
687
+ CompareSelectOperation compare_op_;
688
+ CompareSelectBias bias_;
689
+ };
690
+
691
+ enum IntrinsicsOp {
692
+ kSin,
693
+ kCos,
694
+ kTan,
695
+ kAsin,
696
+ kAcos,
697
+ kAtan,
698
+ kAtan2,
699
+ kSinh,
700
+ kCosh,
701
+ kTanh,
702
+ kSigmoid,
703
+ kExp,
704
+ kExpm1,
705
+ kAbs,
706
+ kLog,
707
+ kLog2,
708
+ kLog10,
709
+ kLog1p,
710
+ kErf,
711
+ kErfc,
712
+ kSqrt,
713
+ kRsqrt,
714
+ kPow,
715
+ kCeil,
716
+ kFloor,
717
+ kRound,
718
+ kTrunc,
719
+ kFmod,
720
+ kRemainder,
721
+ kLgamma,
722
+ kFrac,
723
+ kIsNan,
724
+ kRand, // We need more discussions on this. Should we consider stateful?
725
+ kMaxIntrinsicsOp,
726
+ };
727
+
728
+ class TORCH_API Intrinsics : public ExprNode<Intrinsics> {
729
+ public:
730
+ static ExprHandle make(IntrinsicsOp op_type, const ExprHandle& v1) {
731
+ return ExprHandle(alloc<Intrinsics>(op_type, v1.node()));
732
+ }
733
+
734
+ static ExprHandle make(
735
+ IntrinsicsOp op_type,
736
+ const ExprHandle& v1,
737
+ const ExprHandle& v2) {
738
+ return ExprHandle(alloc<Intrinsics>(op_type, v1.node(), v2.node()));
739
+ }
740
+
741
+ static ExprHandle make(
742
+ IntrinsicsOp op_type,
743
+ const std::vector<ExprHandle>& params) {
744
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
745
+ std::vector<ExprPtr> params_nodes(params.size());
746
+ for (size_t i = 0; i < params.size(); i++) {
747
+ params_nodes[i] = params[i].node();
748
+ }
749
+ return ExprHandle(alloc<Intrinsics>(op_type, params_nodes));
750
+ }
751
+
752
+ static ExprHandle make(IntrinsicsOp op_type, Dtype dtype) {
753
+ return ExprHandle(alloc<Intrinsics>(op_type, dtype));
754
+ }
755
+
756
+ IntrinsicsOp op_type() const {
757
+ return op_type_;
758
+ }
759
+
760
+ std::string func_name() const {
761
+ switch (op_type()) {
762
+ case kSin:
763
+ return "sin";
764
+ case kCos:
765
+ return "cos";
766
+ case kTan:
767
+ return "tan";
768
+ case kAsin:
769
+ return "asin";
770
+ case kAcos:
771
+ return "acos";
772
+ case kAtan:
773
+ return "atan";
774
+ case kAtan2:
775
+ return "atan2";
776
+ case kSinh:
777
+ return "sinh";
778
+ case kCosh:
779
+ return "cosh";
780
+ case kTanh:
781
+ return "tanh";
782
+ case kSigmoid:
783
+ return "sigmoid";
784
+ case kExp:
785
+ return "exp";
786
+ case kAbs:
787
+ return "abs";
788
+ case kLog:
789
+ return "log";
790
+ case kLog2:
791
+ return "log2";
792
+ case kLog10:
793
+ return "log10";
794
+ case kLog1p:
795
+ return "log1p";
796
+ case kErf:
797
+ return "erf";
798
+ case kSqrt:
799
+ return "sqrt";
800
+ case kRsqrt:
801
+ return "rsqrt";
802
+ case kPow:
803
+ return "pow";
804
+ case kCeil:
805
+ return "ceil";
806
+ case kFloor:
807
+ return "floor";
808
+ case kRound:
809
+ return "round";
810
+ case kTrunc:
811
+ return "trunc";
812
+ case kRand:
813
+ return "rand";
814
+ case kFmod:
815
+ return "fmod";
816
+ case kRemainder:
817
+ return "remainder";
818
+ case kLgamma:
819
+ return "lgamma";
820
+ case kExpm1:
821
+ return "expm1";
822
+ case kErfc:
823
+ return "erfc";
824
+ case kFrac:
825
+ return "frac";
826
+ case kIsNan:
827
+ return "isnan";
828
+ default:
829
+ throw std::runtime_error(
830
+ "invalid op_type: " + c10::to_string(op_type()));
831
+ }
832
+ }
833
+
834
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
835
+ Intrinsics(IntrinsicsOp op_type, Dtype dtype)
836
+ : ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
837
+ params_({}),
838
+ op_type_(op_type) {
839
+ if (OpArgCount(op_type) != 0) {
840
+ throw malformed_input("bad arg count in Intrinsics");
841
+ }
842
+ }
843
+
844
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
845
+ Intrinsics(IntrinsicsOp op_type, ExprPtr v1)
846
+ : ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype())),
847
+ params_({std::move(v1)}),
848
+ op_type_(op_type) {
849
+ if (OpArgCount(op_type) != 1) {
850
+ throw malformed_input("bad arg count in Intrinsics");
851
+ }
852
+ }
853
+
854
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
855
+ Intrinsics(IntrinsicsOp op_type, ExprPtr v1, ExprPtr v2)
856
+ : ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype(), v2->dtype())),
857
+ params_({std::move(v1), std::move(v2)}),
858
+ op_type_(op_type) {
859
+ if (OpArgCount(op_type) != 2) {
860
+ throw malformed_input("bad arg count in Intrinsics");
861
+ }
862
+ }
863
+
864
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
865
+ Intrinsics(IntrinsicsOp op_type, const std::vector<ExprPtr>& params)
866
+ : ExprNodeBase(IntrinsicsDtype(op_type, params)),
867
+ params_(params),
868
+ op_type_(op_type) {
869
+ if (OpArgCount(op_type) != nparams()) {
870
+ throw malformed_input("bad arg count in Intrinsics");
871
+ }
872
+ }
873
+
874
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
875
+ Intrinsics(
876
+ IntrinsicsOp op_type,
877
+ Dtype dtype,
878
+ const std::vector<ExprPtr>& params)
879
+ : ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
880
+ params_(params),
881
+ op_type_(op_type) {
882
+ if (OpArgCount(op_type) != nparams()) {
883
+ throw malformed_input("bad arg count in Intrinsics");
884
+ }
885
+ }
886
+
887
+ bool isPure() const {
888
+ return op_type_ != kRand;
889
+ }
890
+
891
+ int nparams() const {
892
+ return params_.size();
893
+ }
894
+
895
+ ExprPtr param(int index) const {
896
+ return params_[index];
897
+ }
898
+ const std::vector<ExprPtr>& params() const {
899
+ return params_;
900
+ }
901
+
902
+ void set_params(std::vector<ExprPtr> params) {
903
+ params_ = std::move(params);
904
+ }
905
+
906
+ static int OpArgCount(IntrinsicsOp op_type);
907
+
908
+ private:
909
+ static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1);
910
+ static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1, Dtype dt2);
911
+ static Dtype IntrinsicsDtype(
912
+ IntrinsicsOp op_type,
913
+ const std::vector<ExprPtr>& params);
914
+
915
+ std::vector<ExprPtr> params_;
916
+ IntrinsicsOp op_type_;
917
+ };
918
+
919
+ TORCH_API std::vector<ExprPtr> ExprHandleVectorToExprVector(
920
+ const std::vector<ExprHandle>&);
921
+ TORCH_API std::vector<ExprHandle> ExprVectorToExprHandleVector(
922
+ const std::vector<ExprPtr>&);
923
+ TORCH_API std::vector<VarPtr> VarHandleVectorToVarVector(
924
+ const std::vector<VarHandle>&);
925
+ TORCH_API std::vector<VarHandle> VarVectorToVarHandleVector(
926
+ const std::vector<VarPtr>&);
927
+ TORCH_API ExprPtr flatten_index(
928
+ const std::vector<ExprPtr>& dims,
929
+ const std::vector<ExprPtr>& indices,
930
+ const std::vector<ExprPtr>& strides);
931
+
932
+ } // namespace tensorexpr
933
+ } // namespace jit
934
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_cloner.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/ScalarType.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <vector>
5
+
6
+ #include <torch/csrc/jit/tensorexpr/ir_mutator.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace tensorexpr {
11
+
12
+ class TORCH_API IRCloner : public IRMutator {
13
+ public:
14
+ ~IRCloner() override = default;
15
+ ExprPtr mutate(AddPtr v) override;
16
+ ExprPtr mutate(SubPtr v) override;
17
+ ExprPtr mutate(MulPtr v) override;
18
+ ExprPtr mutate(DivPtr v) override;
19
+ ExprPtr mutate(ModPtr v) override;
20
+ ExprPtr mutate(MaxPtr v) override;
21
+ ExprPtr mutate(MinPtr v) override;
22
+ ExprPtr mutate(AndPtr v) override;
23
+ ExprPtr mutate(OrPtr v) override;
24
+ ExprPtr mutate(XorPtr v) override;
25
+ ExprPtr mutate(LshiftPtr v) override;
26
+ ExprPtr mutate(RshiftPtr v) override;
27
+ ExprPtr mutate(CompareSelectPtr v) override;
28
+ #define IMM_MUTATE_DECLARE(Type, Name) ExprPtr mutate(Name##ImmPtr v) override;
29
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE);
30
+ #undef IMM_MUTATE_DECLARE
31
+ ExprPtr mutate(CastPtr v) override;
32
+ ExprPtr mutate(BitCastPtr v) override;
33
+ ExprPtr mutate(VarPtr v) override;
34
+ ExprPtr mutate(BufPtr v) override;
35
+ ExprPtr mutate(RampPtr v) override;
36
+ ExprPtr mutate(LoadPtr v) override;
37
+ ExprPtr mutate(BroadcastPtr v) override;
38
+ ExprPtr mutate(IfThenElsePtr v) override;
39
+ ExprPtr mutate(IntrinsicsPtr v) override;
40
+
41
+ ExprPtr mutate(TermPtr v) override;
42
+ ExprPtr mutate(PolynomialPtr v) override;
43
+ ExprPtr mutate(RoundOffPtr v) override;
44
+ ExprPtr mutate(MaxTermPtr v) override;
45
+ ExprPtr mutate(MinTermPtr v) override;
46
+
47
+ ExprPtr mutate(ReduceOpPtr v) override;
48
+
49
+ StmtPtr mutate(ForPtr v) override;
50
+ StmtPtr mutate(BlockPtr v) override;
51
+ StmtPtr mutate(StorePtr v) override;
52
+ StmtPtr mutate(AtomicAddPtr v) override;
53
+ StmtPtr mutate(SyncThreadsPtr v) override;
54
+ StmtPtr mutate(ExternalCallPtr v) override;
55
+ StmtPtr mutate(ExternalCallWithAllocPtr v) override;
56
+
57
+ StmtPtr mutate(AllocatePtr v) override;
58
+ StmtPtr mutate(FreePtr v) override;
59
+ StmtPtr mutate(LetPtr v) override;
60
+ StmtPtr mutate(CondPtr v) override;
61
+ };
62
+
63
+ } // namespace tensorexpr
64
+ } // namespace jit
65
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/ScalarType.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ namespace tensorexpr {
10
+
11
+ class TORCH_API IRMutator {
12
+ public:
13
+ virtual ~IRMutator() = default;
14
+ virtual ExprPtr mutate(AddPtr v);
15
+ virtual ExprPtr mutate(SubPtr v);
16
+ virtual ExprPtr mutate(MulPtr v);
17
+ virtual ExprPtr mutate(DivPtr v);
18
+ virtual ExprPtr mutate(ModPtr v);
19
+ virtual ExprPtr mutate(MaxPtr v);
20
+ virtual ExprPtr mutate(MinPtr v);
21
+ virtual ExprPtr mutate(AndPtr v);
22
+ virtual ExprPtr mutate(OrPtr v);
23
+ virtual ExprPtr mutate(XorPtr v);
24
+ virtual ExprPtr mutate(LshiftPtr v);
25
+ virtual ExprPtr mutate(RshiftPtr v);
26
+ virtual ExprPtr mutate(CompareSelectPtr v);
27
+ #define IMM_MUTATE_DECLARE(Type, Name) virtual ExprPtr mutate(Name##ImmPtr v);
28
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE);
29
+ #undef IMM_MUTATE_DECLARE
30
+ virtual ExprPtr mutate(CastPtr v);
31
+ virtual ExprPtr mutate(BitCastPtr v);
32
+ virtual ExprPtr mutate(VarPtr v);
33
+ virtual ExprPtr mutate(BufPtr v);
34
+ virtual ExprPtr mutate(RampPtr v);
35
+ virtual ExprPtr mutate(LoadPtr v);
36
+ virtual ExprPtr mutate(BroadcastPtr v);
37
+ virtual ExprPtr mutate(IfThenElsePtr v);
38
+ virtual ExprPtr mutate(IntrinsicsPtr v);
39
+
40
+ virtual ExprPtr mutate(TermPtr v);
41
+ virtual ExprPtr mutate(PolynomialPtr v);
42
+ virtual ExprPtr mutate(RoundOffPtr v);
43
+ virtual ExprPtr mutate(MaxTermPtr v);
44
+ virtual ExprPtr mutate(MinTermPtr v);
45
+
46
+ virtual ExprPtr mutate(ReduceOpPtr v);
47
+
48
+ virtual StmtPtr mutate(ForPtr v);
49
+ virtual StmtPtr mutate(BlockPtr v);
50
+ virtual StmtPtr mutate(StorePtr v);
51
+ virtual StmtPtr mutate(AtomicAddPtr v);
52
+ virtual StmtPtr mutate(SyncThreadsPtr v);
53
+ virtual StmtPtr mutate(ExternalCallPtr v);
54
+ virtual StmtPtr mutate(ExternalCallWithAllocPtr v);
55
+
56
+ virtual StmtPtr mutate(AllocatePtr v);
57
+ virtual StmtPtr mutate(FreePtr v);
58
+ virtual StmtPtr mutate(FreeExtPtr v);
59
+ virtual StmtPtr mutate(PlacementAllocatePtr v);
60
+ virtual StmtPtr mutate(LetPtr v);
61
+ virtual StmtPtr mutate(CondPtr v);
62
+ };
63
+
64
+ } // namespace tensorexpr
65
+ } // namespace jit
66
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_printer.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ostream>
4
+
5
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
6
+ #include <torch/csrc/jit/tensorexpr/ir.h>
7
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
8
+ #include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace tensorexpr {
13
+
14
+ class Tensor;
15
+
16
+ class TORCH_API IRPrinter : public IRVisitor {
17
+ public:
18
+ explicit IRPrinter(std::ostream& os) : printer_os_(this, os) {}
19
+
20
+ void print(ExprHandle);
21
+ void print(Expr&);
22
+ void print(Stmt&);
23
+ void visit(AddPtr v) override;
24
+ void visit(SubPtr v) override;
25
+ void visit(MulPtr v) override;
26
+ void visit(DivPtr v) override;
27
+ void visit(ModPtr v) override;
28
+ void visit(MaxPtr v) override;
29
+ void visit(MinPtr v) override;
30
+ void visit(AndPtr v) override;
31
+ void visit(OrPtr v) override;
32
+ void visit(XorPtr v) override;
33
+ void visit(LshiftPtr v) override;
34
+ void visit(RshiftPtr v) override;
35
+ void visit(CompareSelectPtr v) override;
36
+ #define IMM_PRINT_VISIT(Type, Name) void visit(Name##ImmPtr v) override;
37
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT);
38
+ #undef IMM_PRINT_VISIT
39
+ void visit(CastPtr v) override;
40
+ void visit(BitCastPtr v) override;
41
+ void visit(VarPtr v) override;
42
+ void visit(BufPtr v) override;
43
+ void visit(RampPtr v) override;
44
+ void visit(LoadPtr v) override;
45
+ void visit(BroadcastPtr v) override;
46
+ void visit(IfThenElsePtr v) override;
47
+ void visit(IntrinsicsPtr v) override;
48
+ void visit(TermPtr v) override;
49
+ void visit(PolynomialPtr v) override;
50
+ void visit(RoundOffPtr v) override;
51
+ void visit(MaxTermPtr v) override;
52
+ void visit(MinTermPtr v) override;
53
+ void visit(ReduceOpPtr v) override;
54
+
55
+ void visit(AtomicAddPtr v) override;
56
+ void visit(SyncThreadsPtr v) override;
57
+ void visit(ExternalCallPtr v) override;
58
+ void visit(ExternalCallWithAllocPtr v) override;
59
+ void visit(StorePtr v) override;
60
+ void visit(ForPtr v) override;
61
+ void visit(CondPtr v) override;
62
+ void visit(BlockPtr v) override;
63
+ void visit(AllocatePtr v) override;
64
+ void visit(FreePtr v) override;
65
+ void visit(FreeExtPtr v) override;
66
+ void visit(PlacementAllocatePtr v) override;
67
+ void visit(LetPtr v) override;
68
+
69
+ // A child class may have a difference rule for generating dtype
70
+ // string, e.g. CUDA needs int64_t to be generated as long long.
71
+ virtual std::string dtypeToCppString(const Dtype& dtype);
72
+
73
+ std::ostream& os() {
74
+ return printer_os_;
75
+ }
76
+
77
+ class PrinterStream : public std::ostream {
78
+ public:
79
+ PrinterStream(IRPrinter* printer, std::ostream& os)
80
+ : std::ostream(os.rdbuf()), printer_(printer) {}
81
+
82
+ IRPrinter* printer() {
83
+ return printer_;
84
+ }
85
+
86
+ private:
87
+ IRPrinter* printer_ = nullptr;
88
+ };
89
+
90
+ protected:
91
+ std::string to_string(CompareSelectOperation op);
92
+
93
+ UniqueNameManager* name_manager() {
94
+ return &name_manager_;
95
+ }
96
+ void emitIndent();
97
+
98
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
99
+ int indent_ = 0;
100
+
101
+ private:
102
+ PrinterStream printer_os_;
103
+ UniqueNameManager name_manager_;
104
+ };
105
+
106
+ TORCH_API std::ostream& operator<<(std::ostream& stream, const Expr&);
107
+ TORCH_API std::ostream& operator<<(std::ostream& stream, const ExprHandle&);
108
+ TORCH_API std::ostream& operator<<(std::ostream& stream, const Stmt&);
109
+ TORCH_API std::ostream& operator<<(std::ostream& stream, const Tensor&);
110
+
111
+ TORCH_API void print(ExprPtr expr);
112
+ TORCH_API void print(StmtPtr stmt);
113
+ TORCH_API void print(const Tensor& t);
114
+
115
+ } // namespace tensorexpr
116
+ } // namespace jit
117
+ } // namespace torch
118
+
119
+ namespace std {
120
+
121
+ using torch::jit::tensorexpr::Expr;
122
+ using torch::jit::tensorexpr::ExprPtr;
123
+ using torch::jit::tensorexpr::Stmt;
124
+ using torch::jit::tensorexpr::StmtPtr;
125
+ using torch::jit::tensorexpr::Tensor;
126
+
127
+ TORCH_API std::string to_string(ExprPtr expr);
128
+ TORCH_API std::string to_string(StmtPtr stmt);
129
+ TORCH_API std::string to_string(const Tensor& t);
130
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_simplifier.h ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/bounds_overlap.h>
4
+ #include <torch/csrc/jit/tensorexpr/eval.h>
5
+ #include <torch/csrc/jit/tensorexpr/hash_provider.h>
6
+ #include <torch/csrc/jit/tensorexpr/ir.h>
7
+ #include <torch/csrc/jit/tensorexpr/ir_mutator.h>
8
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
9
+ #include <torch/csrc/jit/tensorexpr/types.h>
10
+
11
+ #include <utility>
12
+
13
+ /* IR Simplification
14
+ *
15
+ * Simplifies expressions in two stages:
16
+ * 1. Recursively traverse the map combining similar operations into Terms
17
+ * (interacted via Multiplication) and Polynomials (interacted via Addition). We
18
+ * reorder the components of each Term or Polynomial into a consistent order to
19
+ * allow combination or cancelling of like terms.
20
+ * 2. Once the format of the tree is minimal, expand each Term into a sequence
21
+ * of Muls, and each Polynomial into a sequence of Ads.
22
+ */
23
+
24
+ namespace torch {
25
+ namespace jit {
26
+ namespace tensorexpr {
27
+
28
+ // A bunch of helpers for determine the Dtype of the output of a multi argument
29
+ // Term or Polynomial.
30
+ template <class ExprType>
31
+ Dtype promoteTypesVec(ExprPtr s, std::vector<ExprType>& v) {
32
+ Dtype t = s->dtype();
33
+ bool first = true;
34
+
35
+ for (const auto& e : v) {
36
+ if (first) {
37
+ t = Dtype(t.scalar_type(), e->dtype().lanes());
38
+ first = false;
39
+ }
40
+ t = promoteTypes(t, e->dtype());
41
+ }
42
+ return t;
43
+ }
44
+
45
+ template <class ExprType>
46
+ Dtype promoteTypesVec(std::vector<ExprType>& v) {
47
+ if (v.empty()) {
48
+ throw malformed_input("empty list of types");
49
+ }
50
+
51
+ Dtype t = v[0]->dtype();
52
+ for (const auto& e : v) {
53
+ t = promoteTypes(t, e->dtype());
54
+ }
55
+ return t;
56
+ }
57
+
58
+ template <class ExprType>
59
+ Dtype promoteTypesMap(
60
+ ExprPtr s,
61
+ std::unordered_map<SimplifierHashType, ExprType>& m) {
62
+ Dtype t = s->dtype();
63
+ bool first = true;
64
+ for (auto& e : m) {
65
+ if (first) {
66
+ t = Dtype(t.scalar_type(), e.second->dtype().lanes());
67
+ first = false;
68
+ }
69
+ t = promoteTypes(t, e.second->dtype());
70
+ }
71
+ return t;
72
+ }
73
+
74
+ template <class ExprType>
75
+ Dtype promoteTypesVar(ExprType e) {
76
+ return e->dtype();
77
+ }
78
+
79
+ template <class ExprType, class... Args>
80
+ Dtype promoteTypesVar(ExprType e, Args... es) {
81
+ Dtype lhs = e->dtype();
82
+ Dtype rhs = promoteTypesVar(es...);
83
+ if (e->isConstant()) {
84
+ lhs = Dtype(lhs.scalar_type(), rhs.lanes());
85
+ }
86
+
87
+ return promoteTypes(lhs, rhs);
88
+ }
89
+
90
+ // Uses the evaluator to fold an Expression with constant terms.
91
+ // E.g. evaluateOp(Add(3, 4)) => 7.
92
+ // Expr v must not have any unbound Vars.
93
+ inline ExprPtr evaluateOp(ExprPtr v) {
94
+ ExprHandle handle(v);
95
+ ExprEval<SimpleIREvaluator> eval(handle);
96
+
97
+ switch (v->dtype().scalar_type()) {
98
+ #define TYPE_CASE(Type, Name) \
99
+ case ScalarType::Name: { \
100
+ Type val = eval.value<Type>(); \
101
+ return getImmediateByType(v->dtype().scalar_type(), val); \
102
+ }
103
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
104
+ #undef TYPE_CASE
105
+ default:
106
+ LOG(FATAL) << "Unsupported datatype: " << v->dtype();
107
+ return nullptr;
108
+ }
109
+ return nullptr;
110
+ }
111
+
112
+ // A Term represents a grouping of Exprs through multiplication.
113
+ // E.g. product(scalar, *variables).
114
+ class Term : public ExprNode<Term> {
115
+ public:
116
+ template <class... Args>
117
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
118
+ Term(HashProvider& hasher, ExprPtr s, Args... ts)
119
+ : ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) {
120
+ CHECK(s->isConstant());
121
+ addComponent(ts...);
122
+ sort();
123
+ }
124
+
125
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
126
+ Term(HashProvider& hasher, ExprPtr s, std::vector<ExprPtr> v)
127
+ : ExprNodeBase(promoteTypesVec(s, v)),
128
+ variables_(std::move(v)),
129
+ scalar_(s),
130
+ hasher_(hasher) {
131
+ sort();
132
+ }
133
+
134
+ // Convenience constructor from a map of hash -> var, used when merging Terms.
135
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
136
+ Term(
137
+ HashProvider& hasher,
138
+ ExprPtr s,
139
+ std::unordered_map<SimplifierHashType, ExprPtr> varmap)
140
+ : ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) {
141
+ for (auto& p : varmap) {
142
+ addComponent(p.second);
143
+ }
144
+ sort();
145
+ }
146
+
147
+ ExprPtr scalar() const {
148
+ return scalar_;
149
+ }
150
+ const std::vector<ExprPtr>& variables() const {
151
+ return variables_;
152
+ }
153
+ HashProvider& hasher() const {
154
+ return hasher_;
155
+ }
156
+
157
+ // Produce a hash of just the variable components of this term, to determine
158
+ // if it can be combined with another term.
159
+ SimplifierHashType hashVars() const;
160
+
161
+ private:
162
+ std::vector<ExprPtr> variables_;
163
+ ExprPtr scalar_;
164
+ HashProvider& hasher_;
165
+
166
+ void addComponent() {}
167
+ void addComponent(ExprPtr e) {
168
+ variables_.push_back(std::move(e));
169
+ }
170
+ template <class... Es>
171
+ void addComponent(ExprPtr e, Es&&... es) {
172
+ addComponent(std::move(e));
173
+ addComponent(std::forward<Es>(es)...);
174
+ }
175
+
176
+ // Sort by hash to normalize order of components.
177
+ void sort();
178
+ };
179
+
180
+ // Polynomial represents a grouping of Exprs by addition.
181
+ // E.g. sum(*variables, scalar).
182
+ // This would better be called Expression, but, naming conflict...
183
+ class Polynomial : public ExprNode<Polynomial> {
184
+ public:
185
+ template <class... Args>
186
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
187
+ Polynomial(HashProvider& hasher, ExprPtr s, Args... ts)
188
+ : ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) {
189
+ CHECK(s->isConstant());
190
+ addTerm(ts...);
191
+ sort();
192
+ }
193
+
194
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
195
+ Polynomial(HashProvider& hasher, ExprPtr s, std::vector<TermPtr> v)
196
+ : ExprNodeBase(promoteTypesVec(s, v)),
197
+ variables_(std::move(v)),
198
+ scalar_(s),
199
+ hasher_(hasher) {
200
+ sort();
201
+ }
202
+
203
+ // Helper constructor for list of terms with no scalar component.
204
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
205
+ Polynomial(HashProvider& hasher, std::vector<TermPtr> terms)
206
+ : ExprNodeBase(promoteTypesVec(terms)),
207
+ variables_(std::move(terms)),
208
+ scalar_(getImmediateByType(dtype(), 0)),
209
+ hasher_(hasher) {
210
+ sort();
211
+ }
212
+
213
+ // Convenience constructor for map of hash -> var, used when merging
214
+ // Polynomials.
215
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
216
+ Polynomial(
217
+ HashProvider& hasher,
218
+ ExprPtr s,
219
+ std::unordered_map<SimplifierHashType, TermPtr> varmap)
220
+ : ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) {
221
+ for (auto& p : varmap) {
222
+ addTerm(p.second);
223
+ }
224
+ sort();
225
+ }
226
+
227
+ ExprPtr scalar() const {
228
+ return scalar_;
229
+ }
230
+ const std::vector<TermPtr>& variables() const {
231
+ return variables_;
232
+ }
233
+ HashProvider& hasher() const {
234
+ return hasher_;
235
+ }
236
+
237
+ SimplifierHashType hashVars() const;
238
+
239
+ private:
240
+ std::vector<TermPtr> variables_;
241
+ ExprPtr scalar_;
242
+ HashProvider& hasher_;
243
+
244
+ void addTerm(TermPtr t) {
245
+ variables_.push_back(std::move(t));
246
+ }
247
+ template <class... Ts>
248
+ void addTerm(TermPtr t, Ts&&... ts) {
249
+ addTerm(std::move(t));
250
+ addTerm(std::forward<Ts>(ts)...);
251
+ }
252
+
253
+ // Sort by hash to normalize order of terms.
254
+ void sort();
255
+ };
256
+
257
+ class RoundOff : public BinaryOpNode<RoundOff> {
258
+ public:
259
+ RoundOff(ExprPtr lhs, ExprPtr rhs)
260
+ : BinaryOpNode(lhs, rhs, IRNodeType::kOther) {}
261
+ };
262
+
263
+ class MaxTerm : public ExprNode<MaxTerm> {
264
+ public:
265
+ template <class... Args>
266
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
267
+ MaxTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts)
268
+ : ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)),
269
+ scalar_(s),
270
+ hasher_(hasher),
271
+ propagate_nans_(p) {
272
+ addComponent(ts...);
273
+ uniquefy();
274
+ }
275
+
276
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
277
+ MaxTerm(HashProvider& hasher, ExprPtr s, bool p, std::vector<ExprPtr> v)
278
+ : ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)),
279
+ variables_(std::move(v)),
280
+ scalar_(s),
281
+ hasher_(hasher),
282
+ propagate_nans_(p) {
283
+ uniquefy();
284
+ }
285
+
286
+ bool propagate_nans() const {
287
+ return propagate_nans_;
288
+ }
289
+
290
+ ExprPtr scalar() const {
291
+ return scalar_;
292
+ }
293
+ const std::vector<ExprPtr>& variables() const {
294
+ return variables_;
295
+ }
296
+ HashProvider& hasher() const {
297
+ return hasher_;
298
+ }
299
+
300
+ private:
301
+ std::vector<ExprPtr> variables_;
302
+ ExprPtr scalar_;
303
+ HashProvider& hasher_;
304
+ bool propagate_nans_;
305
+
306
+ void addComponent() {}
307
+ void addComponent(ExprPtr e) {
308
+ variables_.push_back(std::move(e));
309
+ }
310
+ template <class... Es>
311
+ void addComponent(ExprPtr e, Es&&... es) {
312
+ addComponent(std::move(e));
313
+ addComponent(std::forward<Es>(es)...);
314
+ }
315
+
316
+ // Uniquefy the terms using their hash.
317
+ void uniquefy();
318
+ };
319
+
320
+ class MinTerm : public ExprNode<MinTerm> {
321
+ public:
322
+ template <class... Args>
323
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
324
+ MinTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts)
325
+ : ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)),
326
+ scalar_(s),
327
+ hasher_(hasher),
328
+ propagate_nans_(p) {
329
+ addComponent(ts...);
330
+ uniquefy();
331
+ }
332
+
333
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
334
+ MinTerm(HashProvider& hasher, ExprPtr s, bool p, std::vector<ExprPtr> v)
335
+ : ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)),
336
+ variables_(std::move(v)),
337
+ scalar_(s),
338
+ hasher_(hasher),
339
+ propagate_nans_(p) {
340
+ uniquefy();
341
+ }
342
+
343
+ bool propagate_nans() const {
344
+ return propagate_nans_;
345
+ }
346
+
347
+ ExprPtr scalar() const {
348
+ return scalar_;
349
+ }
350
+ const std::vector<ExprPtr>& variables() const {
351
+ return variables_;
352
+ }
353
+ HashProvider& hasher() const {
354
+ return hasher_;
355
+ }
356
+
357
+ private:
358
+ std::vector<ExprPtr> variables_;
359
+ ExprPtr scalar_;
360
+ HashProvider& hasher_;
361
+ bool propagate_nans_;
362
+
363
+ void addComponent() {}
364
+ void addComponent(ExprPtr e) {
365
+ variables_.push_back(std::move(e));
366
+ }
367
+ template <class... Es>
368
+ void addComponent(ExprPtr e, Es&&... es) {
369
+ addComponent(std::move(e));
370
+ addComponent(std::forward<Es>(es)...);
371
+ }
372
+
373
+ // Uniquefy the terms using their hash.
374
+ void uniquefy();
375
+ };
376
+
377
+ // Context-sensitive IR simplification
378
+ using VarBoundInfo = std::unordered_map<VarPtr, analysis::Bound>;
379
+
380
+ class TORCH_API SimplifierUnderContext : public IRMutator {
381
+ public:
382
+ ~SimplifierUnderContext() override = default;
383
+ // Add boundary info for index variables in for-loops
384
+ StmtPtr mutate(ForPtr v) override;
385
+
386
+ ExprPtr mutate(DivPtr v) override;
387
+ ExprPtr mutate(ModPtr v) override;
388
+ ExprPtr mutate(CompareSelectPtr v) override;
389
+ ExprPtr mutate(IfThenElsePtr v) override;
390
+
391
+ protected:
392
+ bool getLoopBoundInfo(const ExprPtr& expr, analysis::Bound* loop_bound_info);
393
+
394
+ protected:
395
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
396
+ HashProvider hasher_;
397
+ VarBoundInfo var_bound_info_;
398
+ };
399
+
400
+ // Stmt simplification should occur in both modes.
401
+ class TORCH_API PolynomialBase : public IRMutator {
402
+ public:
403
+ ~PolynomialBase() override = default;
404
+
405
+ StmtPtr mutate(BlockPtr v) override;
406
+
407
+ StmtPtr mutate(CondPtr v) override;
408
+
409
+ StmtPtr mutate(ForPtr v) override;
410
+
411
+ // Trivially factorize terms by GCD of scalar components.
412
+ TermPtr factorizePolynomial(PolynomialPtr poly);
413
+
414
+ HashProvider& hasher() {
415
+ return hasher_;
416
+ }
417
+
418
+ protected:
419
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
420
+ HashProvider hasher_;
421
+ };
422
+
423
+ // Simplify the IR by combining arithmetic expressions over common terms.
424
+ class TORCH_API PolynomialTransformer : public PolynomialBase {
425
+ public:
426
+ using PolynomialBase::mutate;
427
+ // Inserts term into the provided map, in the case of a hash collision
428
+ // combines the term with the existing and updates the map.
429
+ void addOrUpdateTerm(
430
+ std::unordered_map<SimplifierHashType, TermPtr>& varmap,
431
+ TermPtr term);
432
+
433
+ // Add Polynomial expressions, combining Terms representing the same
434
+ // variables.
435
+ ExprPtr addPolynomials(PolynomialPtr lhs, PolynomialPtr rhs);
436
+
437
+ // Insert a new Term into the provided polynomial. If the new term has
438
+ // common variables to an existing term it is combined.
439
+ ExprPtr insertTerm(PolynomialPtr poly, TermPtr term);
440
+
441
+ // Merge and simplify addition.
442
+ ExprPtr mutate(AddPtr v) override;
443
+
444
+ // Subtract one term from another, cancelling if necessary.
445
+ ExprPtr subTerms(TermPtr lhs, TermPtr rhs, bool negated);
446
+
447
+ // Subtract the RHS Polynomial from the LHS Polynomial, cancelling out where
448
+ // possible.
449
+ ExprPtr subPolynomials(PolynomialPtr lhs, PolynomialPtr rhs);
450
+
451
+ // Merge and simplify subtraction.
452
+ ExprPtr mutate(SubPtr v) override;
453
+
454
+ // Multiply two terms together, usually creating a new term with the variable
455
+ // lists concatenated.
456
+ TermPtr mulTerms(TermPtr lhs, TermPtr rhs);
457
+
458
+ // Multiply a Polynomial by a Term.
459
+ ExprPtr polyByTerm(PolynomialPtr poly, TermPtr term);
460
+
461
+ // Match a rounding pattern and create a RoundOff if found.
462
+ ExprPtr isRoundOff(ExprPtr lhs, ExprPtr rhs);
463
+
464
+ // Inserts a new component into a term, simplifying if possible.
465
+ ExprPtr insertIntoTerm(TermPtr term, ExprPtr expr);
466
+
467
+ // Merge and simplify multiplication.
468
+ ExprPtr mutate(MulPtr v) override;
469
+
470
+ ExprPtr mutate(DivPtr v) override;
471
+
472
+ ExprPtr mutate(ModPtr v) override;
473
+
474
+ ExprPtr mutate(AndPtr v) override;
475
+
476
+ ExprPtr mutate(XorPtr v) override;
477
+
478
+ ExprPtr mutate(LshiftPtr v) override;
479
+
480
+ ExprPtr mutate(RshiftPtr v) override;
481
+
482
+ ExprPtr mutate(MaxPtr v) override;
483
+
484
+ ExprPtr mutate(MinPtr v) override;
485
+
486
+ ExprPtr mutate(CompareSelectPtr v) override;
487
+
488
+ ExprPtr mutate(IntrinsicsPtr v) override;
489
+
490
+ ExprPtr mutate(CastPtr v) override;
491
+
492
+ ExprPtr mutate(IfThenElsePtr v) override;
493
+
494
+ static ExprPtr simplify(ExprPtr e);
495
+ static ExprHandle simplify(const ExprHandle& e);
496
+ static StmtPtr simplify(StmtPtr e);
497
+ };
498
+
499
+ // Expands Terms and Polynomial expressions into primitive operations.
500
+ // Does some simple factorization and reordering.
501
+ class TORCH_API TermExpander : public PolynomialBase {
502
+ PolynomialTransformer* simplifier_;
503
+ std::set<VarPtr> eliminated_allocations_;
504
+
505
+ public:
506
+ using PolynomialBase::mutate;
507
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
508
+ TermExpander(PolynomialTransformer* simplifier) : simplifier_(simplifier) {}
509
+ bool check_safe() {
510
+ return eliminated_allocations_.empty();
511
+ }
512
+
513
+ // Expand Terms out to a series of Muls.
514
+ ExprPtr mutate(TermPtr v) override;
515
+
516
+ // Expand Polynomials out to a series of Adds.
517
+ ExprPtr mutate(PolynomialPtr v) override;
518
+
519
+ // Expand MaxTerms to a series of Max ops.
520
+ ExprPtr mutate(MaxTermPtr v) override;
521
+
522
+ // Expand MinTerms to a series of Min ops.
523
+ ExprPtr mutate(MinTermPtr v) override;
524
+
525
+ // Expand RoundOff to it's component: Mul(Div(lhs, rhs), rhs).
526
+ ExprPtr mutate(RoundOffPtr v) override;
527
+
528
+ // Eliminate zero length allocations.
529
+ StmtPtr mutate(AllocatePtr v) override;
530
+ StmtPtr mutate(FreePtr v) override;
531
+
532
+ // Override to enable condition fusing.
533
+ BlockPtr fuseConditions(BlockPtr v);
534
+ StmtPtr fuseSyncThreads(BlockPtr block);
535
+ StmtPtr mutate(BlockPtr v) override;
536
+ };
537
+
538
+ class TORCH_API IRSimplifier {
539
+ public:
540
+ static StmtPtr simplify(StmtPtr s);
541
+ static ExprPtr simplify(ExprPtr e);
542
+ static ExprHandle simplify(const ExprHandle& e) {
543
+ return ExprHandle(simplify(e.node()));
544
+ }
545
+ };
546
+
547
+ // Flattens the buf and performs the simplifier on the flattened dims.
548
+ ExprPtr buf_flat_size(BufPtr v);
549
+ // Returns true if expressions A and B can be simplified to an equal expression.
550
+ TORCH_API bool exprEquals(ExprPtr A, ExprPtr B);
551
+
552
+ } // namespace tensorexpr
553
+ } // namespace jit
554
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
4
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace tensorexpr {
9
+
10
+ class Expr;
11
+ class ExprHandle;
12
+ class Mod;
13
+ class And;
14
+ class Or;
15
+ class Xor;
16
+ class Lshift;
17
+ class Rshift;
18
+ class CompareSelect;
19
+ class Ramp;
20
+ class Load;
21
+ class IfThenElse;
22
+ class Intrinsics;
23
+
24
+ class Stmt;
25
+ class ExternalCall;
26
+ class Store;
27
+ class For;
28
+ class Block;
29
+
30
+ class TORCH_API IRVerifier : public IRVisitor {
31
+ public:
32
+ IRVerifier() = default;
33
+
34
+ void visit(ModPtr v) override;
35
+ void visit(AndPtr v) override;
36
+ void visit(OrPtr v) override;
37
+ void visit(XorPtr v) override;
38
+ void visit(LshiftPtr v) override;
39
+ void visit(RshiftPtr v) override;
40
+ void visit(CompareSelectPtr v) override;
41
+ void visit(RampPtr v) override;
42
+ void visit(LoadPtr v) override;
43
+ void visit(IfThenElsePtr v) override;
44
+ void visit(IntrinsicsPtr v) override;
45
+
46
+ void visit(ExternalCallPtr v) override;
47
+ void visit(StorePtr v) override;
48
+ void visit(ForPtr v) override;
49
+ void visit(BlockPtr v) override;
50
+ };
51
+
52
+ TORCH_API void verify(StmtPtr);
53
+ TORCH_API void verify(ExprPtr);
54
+ TORCH_API void verify(ExprHandle);
55
+
56
+ } // namespace tensorexpr
57
+ } // namespace jit
58
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/ScalarType.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace tensorexpr {
9
+
10
+ class TORCH_API IRVisitor {
11
+ public:
12
+ virtual ~IRVisitor() = default;
13
+ virtual void visit(AddPtr v);
14
+ virtual void visit(SubPtr v);
15
+ virtual void visit(MulPtr v);
16
+ virtual void visit(DivPtr v);
17
+ virtual void visit(ModPtr v);
18
+ virtual void visit(MaxPtr v);
19
+ virtual void visit(MinPtr v);
20
+ virtual void visit(AndPtr v);
21
+ virtual void visit(OrPtr v);
22
+ virtual void visit(XorPtr v);
23
+ virtual void visit(LshiftPtr v);
24
+ virtual void visit(RshiftPtr v);
25
+ virtual void visit(CompareSelectPtr v);
26
+
27
+ #define IMM_PRINT_VISIT(Type, Name) virtual void visit(Name##ImmPtr v);
28
+
29
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT)
30
+ #undef IMM_PRINT_VISIT
31
+
32
+ virtual void visit(CastPtr v);
33
+ virtual void visit(BitCastPtr v);
34
+ virtual void visit(VarPtr v);
35
+ virtual void visit(BufPtr v);
36
+ virtual void visit(RampPtr v);
37
+ virtual void visit(LoadPtr v);
38
+ virtual void visit(ForPtr v);
39
+ virtual void visit(BlockPtr v);
40
+ virtual void visit(StorePtr v);
41
+ virtual void visit(BroadcastPtr v);
42
+ virtual void visit(IfThenElsePtr v);
43
+ virtual void visit(IntrinsicsPtr v);
44
+ virtual void visit(AllocatePtr v);
45
+ virtual void visit(FreePtr v);
46
+ virtual void visit(FreeExtPtr v);
47
+ virtual void visit(PlacementAllocatePtr v);
48
+ virtual void visit(LetPtr v);
49
+ virtual void visit(CondPtr v);
50
+ virtual void visit(TermPtr v);
51
+ virtual void visit(PolynomialPtr v);
52
+ virtual void visit(RoundOffPtr v);
53
+ virtual void visit(MaxTermPtr v);
54
+ virtual void visit(MinTermPtr v);
55
+ virtual void visit(ReduceOpPtr v);
56
+ virtual void visit(AtomicAddPtr v);
57
+ virtual void visit(SyncThreadsPtr v);
58
+ virtual void visit(ExternalCallPtr v);
59
+ virtual void visit(ExternalCallWithAllocPtr v);
60
+ };
61
+
62
+ } // namespace tensorexpr
63
+ } // namespace jit
64
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h>
5
+ #include <torch/csrc/jit/passes/utils/subgraph_utils.h>
6
+ #include <torch/csrc/jit/runtime/interpreter.h>
7
+ #include <torch/csrc/jit/tensorexpr/analysis.h>
8
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
9
+ #include <torch/csrc/jit/tensorexpr/lowerings.h>
10
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+ namespace tensorexpr {
15
+
16
+ struct SmallSizeTPairHash {
17
+ public:
18
+ std::size_t operator()(const std::pair<size_t, size_t>& x) const {
19
+ // hashing input index and then dim index
20
+ return x.first * 128 + x.second;
21
+ }
22
+ };
23
+
24
+ // Returns true if the TE fuser supports this conv2d.
25
+ bool conv2dIsSupportedJit(const Node* node);
26
+ // Returns true if the TE fuser supports this conv2d with mkldnn prepacked conv.
27
+ bool mkldnnPrepackedConvIsSupportedJit(const Node* node);
28
+ // Returns true if the TE _convolution node is Conv2d.
29
+ bool isConv2d(const Node* node);
30
+ // Returns true if the TE fuser supports this matmul.
31
+ bool matmulIsSupported(const Node* node);
32
+ template <typename T>
33
+ inline std::vector<int64_t> bufferSizes(const T& t) {
34
+ std::vector<int64_t> sizes;
35
+ for (size_t i = 0; i < t->ndim(); i++) {
36
+ sizes.push_back(*intValue(t->dim(i)));
37
+ }
38
+ return sizes;
39
+ }
40
+
41
+ // Get the dimensions of a value.
42
+ std::vector<ExprHandle> valueShape(const ArgValue& v);
43
+
44
+ // If v is a tensor, broadcast it to match the shape of axes, or return
45
+ // directly if v is a constant.
46
+ ExprHandle tensorOrConstant(
47
+ const ArgValue& v,
48
+ const std::vector<ExprHandle>& axes);
49
+
50
+ int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size);
51
+
52
+ ExprHandle broadcast(BufHandle b, const std::vector<ExprHandle>& axes);
53
+
54
+ ExprHandle constant(const ArgValue& v);
55
+
56
+ std::vector<ExprHandle> computeIndicesToBroadcast(
57
+ const std::vector<ExprHandle>& outputAxes,
58
+ const std::vector<ExprHandle>& inputSizes);
59
+
60
+ inline std::string getArgValueName(const ArgValue& a) {
61
+ if (std::holds_alternative<tensorexpr::BufHandle>(a)) {
62
+ return "BufHandle";
63
+ } else if (std::holds_alternative<tensorexpr::VarHandle>(a)) {
64
+ return "VarHandle";
65
+ } else if (std::holds_alternative<double>(a)) {
66
+ return "double";
67
+ } else if (std::holds_alternative<int64_t>(a)) {
68
+ return "int64_t";
69
+ } else if (std::holds_alternative<bool>(a)) {
70
+ return "bool";
71
+ } else if (std::holds_alternative<BufList>(a)) {
72
+ return "BufList";
73
+ } else if (std::holds_alternative<DoubleList>(a)) {
74
+ return "DoubleList";
75
+ } else if (std::holds_alternative<IntList>(a)) {
76
+ return "IntList";
77
+ } else if (std::holds_alternative<ArgNone>(a)) {
78
+ return "None";
79
+ } else {
80
+ throw std::runtime_error("ArgValue type not handled in string conversion");
81
+ }
82
+ }
83
+
84
+ template <class T>
85
+ std::vector<T> convertVecArgValue(const std::vector<ArgValue>& v) {
86
+ std::vector<T> res;
87
+ for (auto& x : v) {
88
+ auto val = std::get_if<T>(&x);
89
+ if (val) {
90
+ res.push_back(*val);
91
+ } else {
92
+ throw std::runtime_error(
93
+ "vector type not homogeneous - found " + getArgValueName(x) +
94
+ ", expected " + getArgValueName(v[0]));
95
+ }
96
+ }
97
+ return res;
98
+ }
99
+
100
+ class TORCH_API TensorExprKernel {
101
+ struct ConstantDescr {
102
+ BufPtr buf;
103
+ // Only one of ptr and node is used at a time
104
+ // 1) ptr for the constant tensors
105
+ // 2) node for the constant custom class objects
106
+ void* ptr = nullptr;
107
+ Node* node = nullptr;
108
+ };
109
+
110
+ public:
111
+ // Constructor Params:
112
+ // * subgraph
113
+ // - the graph that needs to be compiled.
114
+ // * kernel_func_name
115
+ // - the name that should be used for the generated kernel.
116
+ // * custom_lowerings
117
+ // - map that represents custom lowering definitions for a set of ops.
118
+ // * symbolic_shape_inputs
119
+ // - a list of symbolic graph inputs that represent the symbolic dims of
120
+ // the input tensors.
121
+ // * pre_alloc
122
+ // - a flag to control pre-allocation of buffers.
123
+ explicit TensorExprKernel(
124
+ const std::shared_ptr<Graph>& subgraph,
125
+ const std::string& kernel_func_name,
126
+ std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings =
127
+ {},
128
+ std::vector<int64_t> symbolic_shape_inputs = {},
129
+ bool pre_alloc = false,
130
+ std::unordered_map<
131
+ const torch::jit::Value*,
132
+ std::vector<torch::jit::StrideInput>> symbolic_strides = {});
133
+
134
+ explicit TensorExprKernel(
135
+ const std::shared_ptr<Graph>& subgraph,
136
+ std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings =
137
+ {},
138
+ std::vector<int64_t> symbolic_shape_inputs = {},
139
+ bool pre_alloc = false,
140
+ std::unordered_map<
141
+ const torch::jit::Value*,
142
+ std::vector<torch::jit::StrideInput>> symbolic_strides = {})
143
+ : TensorExprKernel(
144
+ subgraph,
145
+ SubgraphUtils::generateNameForGraph(subgraph),
146
+ custom_lowerings,
147
+ symbolic_shape_inputs,
148
+ pre_alloc,
149
+ symbolic_strides) {}
150
+
151
+ void run(Stack& stack) const;
152
+ void runFast(
153
+ const std::vector<void*>& inputs,
154
+ const std::vector<void*>& outputs) const;
155
+ // Expected format of stack:
156
+ // ... <outputs> <inputs>
157
+ // i.e., output IValues must be below the input IValues in the stack.
158
+ void runWithAllocatedOutputs(Stack& stack) const;
159
+
160
+ void fallback(Stack& stack) const {
161
+ InterpreterState(code_).run(stack);
162
+ }
163
+ void recompile();
164
+
165
+ StmtPtr getCodeGenStmt();
166
+
167
+ std::string getCodeText(const std::string& attr = "") {
168
+ return codegen_->getCodeText(attr);
169
+ }
170
+
171
+ const std::shared_ptr<Graph> graph() {
172
+ return graph_;
173
+ }
174
+
175
+ const std::vector<ConstantDescr>& getConstantDescriptors() const {
176
+ return constants_;
177
+ }
178
+
179
+ const std::vector<CodeGen::BufferArg>& getBufferArgs() const {
180
+ return bufferArgs_;
181
+ }
182
+
183
+ const std::string& getKernelName() const {
184
+ return codegen_->kernel_func_name();
185
+ }
186
+
187
+ const std::vector<int64_t>& getSymbolicShapeInputs() const {
188
+ return symbolic_shape_inputs_;
189
+ }
190
+
191
+ private:
192
+ enum BackendType {
193
+ kUninitialized,
194
+ kSimpleIREval,
195
+ kLLVMCodeGen,
196
+ kCudaCodeGen,
197
+ kBlockCodeGen,
198
+ };
199
+
200
+ enum MemoryLayoutPolicy {
201
+ kContiguous,
202
+ kChannelsLastNdContiguous,
203
+ };
204
+
205
+ void compile();
206
+ void genInputDebugNames();
207
+ void runKernel(Stack& stack) const;
208
+
209
+ std::vector<ExprHandle> sizesForValue(const torch::jit::Value* v);
210
+
211
+ // These functions broadcast shape and also store a `hasBroadcast_` variable.
212
+ std::vector<ExprHandle> broadcastShapesMut(
213
+ const std::vector<ExprHandle>& a,
214
+ const std::vector<ExprHandle>& b);
215
+ std::vector<ExprHandle> broadcastShapesMut(
216
+ std::vector<std::vector<ExprHandle>> shapes);
217
+
218
+ ArgValue toArg(const torch::jit::Value* v) const;
219
+ ExprHandle constant(const torch::jit::Value* v);
220
+
221
+ Tensor computeValue(const torch::jit::Value* v);
222
+
223
+ void bindConstant(const torch::jit::Value* v);
224
+
225
+ StmtPtr transformLoops(BackendType backendType, StmtPtr st);
226
+
227
+ std::string getCodeGenName(BackendType backendType);
228
+
229
+ void getStaticOutputSizesAndStrides(
230
+ const at::ArrayRef<IValue>& inputs,
231
+ std::vector<std::vector<int64_t>>* static_sizes,
232
+ std::vector<std::vector<int64_t>>* static_strides) const;
233
+
234
+ std::vector<CodeGen::CallArg> prepareRunArgs(
235
+ const at::ArrayRef<IValue>& inputs,
236
+ std::vector<at::Tensor>& outputs) const;
237
+ BackendType inferBackendTypeFromDevice(at::Device device);
238
+
239
+ Tensor bindInput(const torch::jit::Value* input);
240
+ BlockPtr bindAllInputs();
241
+
242
+ // Deduce the memory layout policy to be propagated within
243
+ // NNC fusion group. The memory layout policy could be `kContiguous`
244
+ // or `kChannelsLastNdContiguous`.
245
+ // `kContiguous`: Always convert the non-contiguous input tensors and
246
+ // internal buffers to contiguous.
247
+ // `kChannelsLastNdContiguous`: Always convert the input tensors and
248
+ // internal buffers to channels-last contiguous.
249
+ // Currently, the rule is simple.
250
+ // If all the input and out tensors of NNC fusion group are channels-last
251
+ // contiguous, the policy is `kChannelsLastNdContiguous`. Otherwise, it
252
+ // is always `kContiguous`.
253
+ void deduceMemoryLayoutPolicy();
254
+
255
+ Tensor convertSymbolicOutputToCorrectStrides(torch::jit::Value* v);
256
+ Tensor convertStaticShapeOutputToCorrectStrides(torch::jit::Value* v);
257
+ Tensor convertSymbolicOutputToCorrectStrides(
258
+ const std::vector<ExprHandle>& sizes,
259
+ const std::vector<size_t>& sorted_stride_indices_descending,
260
+ const std::vector<ExprPtr>& strides,
261
+ BufPtr& buf);
262
+
263
+ NNCLoweringFunction getCustomLoweringFor(c10::Symbol op) const;
264
+ std::unordered_map<c10::Symbol, NNCLoweringFunction> getCustomLowerings()
265
+ const {
266
+ return custom_lowerings_;
267
+ }
268
+
269
+ // Allocate memory for intermediate buffers at compile time.
270
+ // Specifically, we pre-allocate memory for intermediate buffers with static
271
+ // size and manage these buffers in the way we manage JIT constant tensors:
272
+ // push the buf args into the stack so NNC IR can access them at runtime.
273
+ std::vector<BufPtr> preAllocIntermediateBufs(
274
+ const std::vector<BufPtr>& interm_bufs);
275
+
276
+ struct UnpackedTensorOptions {
277
+ c10::optional<c10::ScalarType> dtype;
278
+ c10::optional<c10::Layout> layout;
279
+ c10::optional<c10::Device> device;
280
+ c10::optional<bool> pinned_memory;
281
+
282
+ UnpackedTensorOptions(const c10::TensorOptions& opts)
283
+ : dtype(c10::optTypeMetaToScalarType(opts.dtype_opt())),
284
+ layout(opts.layout_opt()),
285
+ device(opts.device_opt()),
286
+ pinned_memory(opts.pinned_memory_opt()) {}
287
+ };
288
+
289
+ ExprHandle getVarForShape(const c10::ShapeSymbol& ss);
290
+ std::vector<ExprHandle> computeInputTensorDims(
291
+ const torch::jit::Value* input);
292
+ ExprHandle getStrideArg(size_t tensor_input, size_t stride_index);
293
+ std::vector<ExprHandle> sizesFromSymbolicShape(
294
+ const c10::SymbolicShape& shape);
295
+ std::vector<ExprHandle> getInputStrides(
296
+ const torch::jit::Value* input,
297
+ const std::vector<ExprHandle>& inputTensorDims);
298
+ std::vector<torch::jit::StrideInput>& getSymbolicStrideDesc(
299
+ const torch::jit::Value* value);
300
+
301
+ // Apply the optimizations to the graph owned by the current fusion group,
302
+ // like concatenation optimization, post-op fusion, and some other graph-level
303
+ // optimizations.
304
+ void optimizeOwningGraph();
305
+
306
+ int64_t nInputs_ = 0;
307
+ int64_t nOutputs_ = 0;
308
+ std::vector<CodeGen::BufferArg> bufferArgs_;
309
+ std::vector<std::vector<int64_t>> tensorOutputSizes_;
310
+ std::vector<std::vector<int64_t>> tensorOutputStrides_;
311
+ std::vector<torch::jit::StrideInput> tensorOutputStrideDesc_;
312
+ std::vector<bool> isOutputScalar_;
313
+ std::vector<UnpackedTensorOptions> tensorOutputTensorOptions_;
314
+ std::unordered_set<BufPtr> bufOutputs_;
315
+ std::unordered_set<BufPtr> bufsToBeParallelized_;
316
+ std::unordered_map<const torch::jit::Value*, BufPtr> bufs_;
317
+ std::unordered_map<const torch::jit::Value*, VarHandle> scalars_;
318
+ std::unordered_map<const torch::jit::Value*, std::string> input_name_map_;
319
+ std::unique_ptr<CodeGen> codegen_;
320
+ at::Device device_ = at::kCPU;
321
+ std::shared_ptr<Graph> graph_;
322
+ Code code_;
323
+ bool allow_fallback_{false};
324
+ bool use_fallback_{false};
325
+ bool hasRandom_{false};
326
+ bool hasBroadcast_{false};
327
+ std::unordered_map<const torch::jit::Value*, std::vector<ExprHandle>>
328
+ known_sizes_;
329
+
330
+ std::vector<std::vector<ExprHandle>> tensorOutputSymbolicSizes_;
331
+ // A map from ShapeSymbol.value() to the corresponding Var.
332
+ std::unordered_map<int64_t, VarHandle> shapeSymbolToVar_;
333
+ std::unordered_map<ExprPtr, size_t> shapeSymbolInputPos_;
334
+ // List of values corresponding to the ShapeSymbols that are inputs to
335
+ // kernel being compiled. The order of these values correspond to the order
336
+ // of the symbolic inputs at the end of the list of inputs to the kernel.
337
+ std::vector<int64_t> symbolic_shape_inputs_;
338
+ bool has_symbolic_shapes_{false};
339
+
340
+ std::vector<at::Tensor> unpacked_constant_tensors_;
341
+ std::vector<ConstantDescr> constants_;
342
+
343
+ std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings_;
344
+ StmtPtr stmt_ = nullptr;
345
+ bool pre_alloc_{false};
346
+ std::string kernel_func_name_;
347
+
348
+ // index of stack, stride index of tensor that will be appended as a codegen
349
+ // arg
350
+ std::vector<std::pair<size_t, size_t>> input_stride_args_;
351
+ // map from <input index, tensor dimension> to stride as arg VarHandle
352
+ std::unordered_map<std::pair<size_t, size_t>, VarHandle, SmallSizeTPairHash>
353
+ strideArgToVar_;
354
+ std::unordered_map<
355
+ const torch::jit::Value*,
356
+ std::vector<torch::jit::StrideInput>>
357
+ symbolic_strides_;
358
+
359
+ // Memory layout to be propagated with fusion group
360
+ MemoryLayoutPolicy memory_layout_policy_ = MemoryLayoutPolicy::kContiguous;
361
+ };
362
+
363
+ TORCH_API int& getTECudaPointwiseLoopLevels();
364
+ TORCH_API int& getTECudaPointwiseBlockCount();
365
+ TORCH_API int& getTECudaPointwiseBlockSize();
366
+ TORCH_API bool& getTEGenerateBlockCode();
367
+ TORCH_API bool& getTEMustUseLLVMOnCPU();
368
+ TORCH_API bool fallbackAllowed();
369
+ TORCH_API bool setFallbackAllowed(bool value);
370
+ TORCH_API bool& getCatWoConditionals();
371
+ TORCH_API bool& getOptConditionals();
372
+
373
+ TORCH_API c10::optional<at::Device> pickDeviceType(
374
+ const at::ArrayRef<torch::jit::Value*>& inputs);
375
+
376
+ bool isContiguous(
377
+ const torch::jit::Value* v,
378
+ at::MemoryFormat memory_format = at::MemoryFormat::Contiguous);
379
+
380
+ } // namespace tensorexpr
381
+ } // namespace jit
382
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_codegen.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef TORCH_ENABLE_LLVM
4
+ #include <torch/csrc/Export.h>
5
+
6
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
7
+ #include <torch/csrc/jit/tensorexpr/ir.h>
8
+ #include <torch/csrc/jit/tensorexpr/ir_visitor.h>
9
+
10
+ #include <c10/util/Optional.h>
11
+
12
+ #include <unordered_map>
13
+ #include <vector>
14
+
15
+ namespace torch {
16
+ namespace jit {
17
+ namespace tensorexpr {
18
+
19
+ class LLVMCodeGenImpl;
20
+ class LLVMCodeGenCallee;
21
+
22
+ class TORCH_API LLVMCodeGen : public CodeGen {
23
+ public:
24
+ explicit LLVMCodeGen(
25
+ StmtPtr stmt,
26
+ const std::vector<BufferArg>& args,
27
+ at::Device device = at::kCPU,
28
+ const std::string& kernel_func_name = "func",
29
+ Dtype dtype = kInt,
30
+ c10::optional<std::string> triple = c10::nullopt,
31
+ c10::optional<std::string> cpu = c10::nullopt,
32
+ c10::optional<std::string> attrs = c10::nullopt);
33
+ explicit LLVMCodeGen(StmtPtr stmt);
34
+
35
+ LLVMCodeGen() = delete;
36
+ ~LLVMCodeGen() override;
37
+
38
+ // Cleans up all the memory used during LLVM code generation pass except
39
+ // the generated kernel. After calling this method, users should not call
40
+ // methods like `getCodeText` that require the LLVMCodeGenImpl data. However,
41
+ // users can continue to call this kernel using `call` and `call_raw`.
42
+ void cleanup_memory();
43
+
44
+ TORCH_API void call(const std::vector<CallArg>& args) override;
45
+ TORCH_API void call_raw(const std::vector<void*>& args) override;
46
+ TORCH_API void call_with_numel(void** args, int64_t numel) override;
47
+
48
+ at::Tensor empty_strided(
49
+ c10::IntArrayRef size,
50
+ c10::IntArrayRef stride,
51
+ c10::optional<c10::ScalarType> dtype_opt,
52
+ c10::optional<c10::Layout> layout_opt,
53
+ c10::optional<c10::Device> device_opt,
54
+ c10::optional<bool> pin_memory_opt) override;
55
+
56
+ template <typename T>
57
+ T value() {
58
+ return value<T>(nullptr);
59
+ }
60
+
61
+ template <typename T>
62
+ T value(std::vector<void*>& args) {
63
+ return value<T>(args.data());
64
+ }
65
+
66
+ template <typename T>
67
+ T value(void** args) {
68
+ T (*fp)(void**) = (T(*)(void**))getKernelAddress(callee_.get());
69
+ T rv = fp(args);
70
+ return rv;
71
+ }
72
+
73
+ std::string getCodeText(const std::string& attr = "") override;
74
+
75
+ private:
76
+ void* getKernelAddress(LLVMCodeGenCallee* callee);
77
+
78
+ std::unique_ptr<LLVMCodeGenCallee> callee_;
79
+ std::unique_ptr<LLVMCodeGenImpl> impl_;
80
+ };
81
+
82
+ struct TORCH_API LLVMCodeGenBuilder {
83
+ using BufferArg = CodeGen::BufferArg;
84
+
85
+ LLVMCodeGenBuilder(StmtPtr stmt, std::vector<BufferArg> args)
86
+ : stmt_(stmt), args_(std::move(args)) {}
87
+
88
+ LLVMCodeGenBuilder& device(at::Device device) {
89
+ device_ = device;
90
+ return *this;
91
+ }
92
+
93
+ LLVMCodeGenBuilder& kernelFuncName(std::string name) {
94
+ kernelFuncName_ = std::move(name);
95
+ return *this;
96
+ }
97
+
98
+ LLVMCodeGenBuilder& dtype(Dtype d) {
99
+ dtype_ = d;
100
+ return *this;
101
+ }
102
+
103
+ LLVMCodeGenBuilder& triple(std::string triple) {
104
+ triple_ = std::move(triple);
105
+ return *this;
106
+ }
107
+
108
+ LLVMCodeGenBuilder& cpu(std::string cpu) {
109
+ cpu_ = std::move(cpu);
110
+ return *this;
111
+ }
112
+
113
+ LLVMCodeGenBuilder& attrs(std::string attrs) {
114
+ attrs_ = std::move(attrs);
115
+ return *this;
116
+ }
117
+
118
+ std::unique_ptr<LLVMCodeGen> build() {
119
+ return std::make_unique<LLVMCodeGen>(
120
+ stmt_, args_, device_, kernelFuncName_, dtype_, triple_, cpu_, attrs_);
121
+ }
122
+
123
+ private:
124
+ StmtPtr stmt_;
125
+ std::vector<BufferArg> args_;
126
+ at::Device device_ = at::kCPU;
127
+ std::string kernelFuncName_ = "func";
128
+ Dtype dtype_ = kInt;
129
+ c10::optional<std::string> triple_ = c10::nullopt;
130
+ c10::optional<std::string> cpu_ = c10::nullopt;
131
+ c10::optional<std::string> attrs_ = c10::nullopt;
132
+ };
133
+
134
+ TORCH_API c10::optional<std::string>& LLVMTargetTriple();
135
+ TORCH_API c10::optional<std::string>& LLVMTargetCPU();
136
+ TORCH_API c10::optional<std::string>& LLVMTargetAttrs();
137
+ TORCH_API bool& LLVMAOTWorkflow();
138
+
139
+ } // namespace tensorexpr
140
+ } // namespace jit
141
+ } // namespace torch
142
+
143
+ #endif // TORCH_ENABLE_LLVM
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_jit.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef TORCH_ENABLE_LLVM
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <torch/csrc/Export.h>
8
+
9
+ C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
10
+ #include <llvm/ExecutionEngine/JITSymbol.h>
11
+ C10_DIAGNOSTIC_POP()
12
+ #include <llvm/ExecutionEngine/Orc/Core.h>
13
+ #include <llvm/ExecutionEngine/Orc/ThreadSafeModule.h>
14
+ #include <llvm/Target/TargetMachine.h>
15
+
16
+ #include <memory>
17
+ #include <string>
18
+
19
+ namespace torch {
20
+ namespace jit {
21
+ namespace tensorexpr {
22
+
23
+ inline std::string formatError(llvm::Error&& err, const char* msg) {
24
+ static constexpr const char* defaultErrorMsg =
25
+ "Unexpected failure in LLVM JIT";
26
+ std::string errorMsg(msg ? msg : defaultErrorMsg);
27
+ llvm::raw_string_ostream ss(errorMsg);
28
+ ss << ": " << err;
29
+ return ss.str();
30
+ }
31
+
32
+ template <typename T>
33
+ T assertSuccess(llvm::Expected<T> valOrErr, const char* msg = nullptr) {
34
+ TORCH_INTERNAL_ASSERT(valOrErr, formatError(valOrErr.takeError(), msg));
35
+ return std::move(*valOrErr);
36
+ }
37
+
38
+ inline void assertSuccess(llvm::Error err, const char* msg = nullptr) {
39
+ TORCH_INTERNAL_ASSERT(!err, formatError(std::move(err), msg));
40
+ }
41
+
42
+ } // namespace tensorexpr
43
+ } // namespace jit
44
+ } // namespace torch
45
+
46
+ namespace llvm {
47
+ namespace orc {
48
+
49
+ class PytorchLLVMJITImpl;
50
+
51
+ class TORCH_API PytorchLLVMJIT {
52
+ public:
53
+ PytorchLLVMJIT(
54
+ c10::optional<std::string> triple,
55
+ c10::optional<std::string> cpu,
56
+ c10::optional<std::string> attrs);
57
+ ~PytorchLLVMJIT();
58
+
59
+ void addModule(std::unique_ptr<Module> M, std::unique_ptr<LLVMContext> C);
60
+
61
+ JITSymbol findSymbol(const std::string Name);
62
+
63
+ bool hasSymbol(const std::string& Name);
64
+
65
+ TargetMachine& getTargetMachine();
66
+
67
+ const DataLayout& getDataLayout();
68
+
69
+ private:
70
+ // Use the PImpl idiom here to hide the no-rtti parts of the JIT structure.
71
+ std::unique_ptr<PytorchLLVMJITImpl> impl_;
72
+ };
73
+
74
+ } // end namespace orc
75
+ } // end namespace llvm
76
+
77
+ #endif // ENABLE LLVM
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest.h ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <unordered_map>
5
+ #include <unordered_set>
6
+ #include <vector>
7
+
8
+ #include <torch/csrc/Export.h>
9
+ #include <torch/csrc/jit/tensorexpr/fwd_decls.h>
10
+
11
+ namespace torch {
12
+ namespace jit {
13
+ namespace tensorexpr {
14
+
15
+ class Expr;
16
+ class Var;
17
+ class Buf;
18
+ class Tensor;
19
+ class Function;
20
+ class Stmt;
21
+ class For;
22
+ class Block;
23
+ class Store;
24
+ class Dtype;
25
+
26
+ class TORCH_API LoopNest {
27
+ public:
28
+ // A constructor for building a LoopNest from a list of Tensors
29
+ LoopNest(
30
+ const std::vector<Tensor>& output_tensors,
31
+ const std::vector<Tensor>& tensors_to_compute);
32
+
33
+ // A convenience constructor for the case when all tensors are output tensors
34
+ LoopNest(const std::vector<Tensor>& output_tensors);
35
+
36
+ // A constructor for building a LoopNest from an Stmt and a list of output
37
+ // buffers.
38
+ LoopNest(StmtPtr stmt, std::unordered_set<BufPtr> output_bufs);
39
+
40
+ // A constructor for building a LoopNest from another loopnest. It clones the
41
+ // other loopnest's stmt.
42
+ LoopNest(const LoopNest& other);
43
+
44
+ StmtPtr root_stmt() const {
45
+ return root_stmt_;
46
+ }
47
+
48
+ std::vector<ForPtr> getLoopStmtsFor(Tensor) const;
49
+ std::vector<ForPtr> getLoopStmtsFor(BufPtr) const;
50
+ std::vector<ForPtr> getLoopStmtsFor(StmtPtr) const;
51
+ StmtPtr getLoopBodyFor(Tensor) const;
52
+ StmtPtr getLoopBodyFor(BufPtr) const;
53
+
54
+ // Returns the For stmt indexed by 'indices' in the 'root' For stmt.
55
+ //'indices' indicates the path to the returned loop from 'root' in AST, e.g.,
56
+ //
57
+ // root: for(int i...){
58
+ // j_loop: for (int j...){
59
+ // k1_loop: for (int k1...){
60
+ // A[i, j, k1] = ....
61
+ // }
62
+ // B[i, j] = ...
63
+ // k2_loop: for (int k2...){
64
+ // A[i, j, k2] = ...
65
+ // }
66
+ // }
67
+ // }
68
+ //
69
+ // the path from 'root' to 'j_loop' is [0]
70
+ // the path from 'root' to 'k1_loop' is [0, 0]
71
+ // the path from 'root' to 'k2_loop' is [0, 2]
72
+ ForPtr getLoopAt(ForPtr root, const std::vector<int>& indices) const;
73
+
74
+ // Returns the For stmt that is immediately enclosing the given stmt.
75
+ static ForPtr getParentLoop(StmtPtr st);
76
+
77
+ // Returns the list of For stmts corresponding to the loopnest that is
78
+ // enclosing the given stmt.
79
+ static std::vector<ForPtr> getEnclosingLoopNest(StmtPtr st);
80
+
81
+ // Returns a list of all Stmts that write to the given buf.
82
+ std::vector<StmtPtr> getAllWritesToBuf(BufPtr) const;
83
+
84
+ // The following methods return the For loops that contain writes to
85
+ // the given buf.
86
+ //
87
+ // For example, consider the following code:
88
+ // for i1
89
+ // for j1
90
+ // a[i1,j1] =
91
+ // for i2
92
+ // for j2
93
+ // for k2
94
+ // a[i2,j2] =
95
+ // for j3
96
+ // a[i2,j3] =
97
+
98
+ // Returns a list of For loops which directly contain a Stmt that writes
99
+ // to buf.
100
+ // For the above example:
101
+ // getAllInnermostLoopsWritingToBuf(a) => {j1, k2, j3}
102
+ std::vector<ForPtr> getAllInnermostLoopsWritingToBuf(BufPtr) const;
103
+
104
+ // Returns a list of For loopnests which contain a Stmt that writes to
105
+ // the given buf. Each loopnest here is a vector For loops.
106
+ // For the above example:
107
+ // getAllLoopNestsWritingToBuf(a) => {{i1,j1}, {i2,j2,k2}, {i2,j3}}
108
+ std::vector<std::vector<ForPtr>> getAllLoopNestsWritingToBuf(BufPtr) const;
109
+
110
+ StmtPtr simplify();
111
+
112
+ // Sanitize variables and buffer names.
113
+ // The pass assigns predefined names for loop index variables
114
+ // (i,j,k,l,m,n,o,p,i1,j1,k1,...) and ensures these names are not conflicting
115
+ // anywhere. It also removes duplicates from other Buf nad Var names as well
116
+ // as replaces illegal characters in them with underscores.
117
+ //
118
+ // Note: since it's currently technically possible to use the same variable
119
+ // as index in two different loops, this transformation finds such cases and
120
+ // introduces new variables to avoid duplication.
121
+ static StmtPtr sanitizeNames(StmtPtr s);
122
+
123
+ bool computeInline(StmtPtr s);
124
+ bool computeInline(BufPtr b);
125
+ void inlineIntermediateBufs(bool allow_duplicated_work);
126
+
127
+ // Optimizes conditionals.
128
+ //
129
+ // Currently, only the following pattern of conditionals is optimized.
130
+ // This corresponds to the conditional format that is generated to handle
131
+ // `aten::cat` op.
132
+ //
133
+ // for (int i = 0; i < 20; i++) {
134
+ // A[i] = IfThenElse(i<5 ? 1 : 0, B[i], C[i-5])
135
+ // }
136
+ //
137
+ // Constraints that must be satisfied for this optimization:
138
+ // * All conditions should be of the form "var < expr".
139
+ // * All conditions should have the same variable, say v.
140
+ // * The condition variable found should be the same as the inner-most
141
+ // loop variable. TODO: Remove this constraint.
142
+ // * If there are multiple stores that contain conditionals using the same
143
+ // loop variable, only the first conditional will be optimized.
144
+ // TODO: Remove this constraint.
145
+ bool optimizeConditionals();
146
+
147
+ // Splits the given loop into 2 nested loops with the given factor as the
148
+ // inner loop bound. If the factor does not evenly divide the loop bound,
149
+ // then the remaining iterations are extracted into a tail loop that is
150
+ // added after the given loop.
151
+ //
152
+ // For example, consider the following code:
153
+ // for (int i = 0; i < 100; ++i) {
154
+ // A[i] =
155
+ // }
156
+ //
157
+ // splitWithTail(i, 8, ...) will result in:
158
+ // for (int i_outer = 0; i_outer < 12; ++i_outer) {
159
+ // for (int i_inner = 0; i_inner < 8; ++i_inner) {
160
+ // A[i_outer * 8 + i_inner] =
161
+ // }
162
+ // }
163
+ // for (int i_tail = 0; i_tail < 4; ++i_tail) {
164
+ // A[i_tail + 96] =
165
+ // }
166
+ //
167
+ // The given loop will be transformed to the outer loop after splitting.
168
+ // So, the pointer to the input loop should be valid after splitting and
169
+ // will point to the outer loop. The `inner` and `tail` parameters will be
170
+ // set to point to the inner and tail loops that are generated.
171
+ static void splitWithTail(ForPtr f, int factor, ForPtr* inner, ForPtr* tail);
172
+ // A convenience wrapper when the caller does not need to access the
173
+ // split loops.
174
+ static void splitWithTail(ForPtr f, int factor);
175
+
176
+ // Splits the given loop into 2 nested loops with the given factor as the
177
+ // inner loop bound. If the factor does not evenly divide the loop bound,
178
+ // then a conditional is inserted into the body to handle the remaining
179
+ // iterations appropriately.
180
+ //
181
+ // For example, consider the following code:
182
+ // for (int i = 0; i < 100; ++i) {
183
+ // A[i] =
184
+ // }
185
+ //
186
+ // splitWithMask(i, 8, ...) will result in:
187
+ // for (int i_outer = 0; i_outer < 13; ++i_outer) {
188
+ // for (int i_inner = 0; i_inner < 8; ++i_inner) {
189
+ // if (i_outer * 8 + i_inner < 100) {
190
+ // A[i_outer * 8 + i_inner] =
191
+ // }
192
+ // }
193
+ // }
194
+ //
195
+ // The given loop will be transformed to the outer loop after splitting.
196
+ // So, the pointer to the input loop should be valid after splitting and
197
+ // will point to the outer loop. The `inner` parameter will be set to point
198
+ // to the inner loop that is generated.
199
+ static void splitWithMask(ForPtr f, int factor, ForPtr* inner);
200
+ // A convenience wrapper when the caller does not need to access the
201
+ // split loops.
202
+ static void splitWithMask(ForPtr f, int factor);
203
+
204
+ // The following methods support loop distribution.
205
+ // For example, consider the following code. This will be used to
206
+ // demonstrate the methods below.
207
+ //
208
+ // S0: for m
209
+ // S1: for i
210
+ // S2: A[i] = 0
211
+ // S3: for j
212
+ // S4: A[i] = A[i] +
213
+ // S5: B[i] = A[i]
214
+ // S6: for k
215
+ // S7: B[i] = B[i] +
216
+
217
+ // This method distributes the given loop over its body by splitting
218
+ // after every given pivot stmt.
219
+ //
220
+ // NOTE: Pivot stmts that are not in the given loop's body will be ignored.
221
+ //
222
+ // For the above example:
223
+ // distributeLoop(S1, {S3, S5})
224
+ // will result in:
225
+ // S0: for m
226
+ // S1: for i
227
+ // S2: A[i] = 0
228
+ // S3: for j
229
+ // S4: A[i] = A[i] +
230
+ // : for i
231
+ // S5: B[i] = A[i]
232
+ // : for i
233
+ // S6: for k
234
+ // S7: B[i] = B[i] +
235
+ static std::vector<ForPtr> distributeLoop(
236
+ ForPtr loop,
237
+ const std::unordered_set<StmtPtr>& pivots);
238
+
239
+ // This method distributes the given loop over every stmt in its body.
240
+ //
241
+ // For the above example:
242
+ // distributeLoop(S1)
243
+ // will result in:
244
+ // S0: for m
245
+ // S1: for i
246
+ // S2: A[i] = 0
247
+ // : for i
248
+ // S3: for j
249
+ // S4: A[i] = A[i] +
250
+ // : for i
251
+ // S5: B[i] = A[i]
252
+ // : for i
253
+ // S6: for k
254
+ // S7: B[i] = B[i] +
255
+ static std::vector<ForPtr> distributeLoop(ForPtr loop);
256
+ // Same as above, but also distribute parent loops.
257
+ // Returns the result of distributing the outermost loop.
258
+ //
259
+ // For the above example:
260
+ // distributeLoopAndParents(S1) will result in:
261
+ // S0: for m
262
+ // S1: for i
263
+ // S2: A[i] = 0
264
+ // : for m
265
+ // : for i
266
+ // S3: for j
267
+ // S4: A[i] = A[i] +
268
+ // : for m
269
+ // : for i
270
+ // S5: B[i] = A[i]
271
+ // : for m
272
+ // : for i
273
+ // S6: for k
274
+ // S7: B[i] = B[i] +
275
+ static std::vector<ForPtr> distributeLoopAndParents(ForPtr loop);
276
+
277
+ // This method distributes the given loop over its body by splitting
278
+ // after every For stmt in its body.
279
+ //
280
+ // For the above example:
281
+ // distributeLoopOverInnerLoops(S1)
282
+ // will result in:
283
+ // S0: for m
284
+ // S1: for i
285
+ // S2: A[i] = 0
286
+ // S3: for j
287
+ // S4: A[i] = A[i] +
288
+ // : for i
289
+ // S5: B[i] = A[i]
290
+ // S6: for k
291
+ // S7: B[i] = B[i] +
292
+ static std::vector<ForPtr> distributeLoopOverInnerLoops(ForPtr loop);
293
+ // Same as above, but also distribute parent loops.
294
+ // Returns the result of distributing the outermost loop.
295
+ //
296
+ // For the above example:
297
+ // distributeLoopAndParentsOverInnerLoops(S1)
298
+ // will result in:
299
+ // S0: for m
300
+ // S1: for i
301
+ // S2: A[i] = 0
302
+ // S3: for j
303
+ // S4: A[i] = A[i] +
304
+ // : for m
305
+ // : for i
306
+ // S5: B[i] = A[i]
307
+ // S6: for k
308
+ // S7: B[i] = B[i] +
309
+ static std::vector<ForPtr> distributeLoopAndParentsOverInnerLoops(
310
+ ForPtr loop);
311
+
312
+ // This method performs loop fusion.
313
+ // For example, consider the following code.
314
+ //
315
+ // S1: for m
316
+ // S2: A[m] = 0
317
+ // S3: for j
318
+ // S4: A[m] = A[m] +
319
+ // S5: for n
320
+ // S5: B[n] = A[n]
321
+ // S6: for k
322
+ // S7: B[n] = B[n] +
323
+ //
324
+ // fuseLoops({S1, S5}), will return the following loop:
325
+ // S1: for m
326
+ // S2: A[m] = 0
327
+ // S3: for j
328
+ // S4: A[m] = A[m] +
329
+ // S5: B[m] = A[m]
330
+ // S6: for k
331
+ // S7: B[m] = B[m] +
332
+ //
333
+ // This transformation is unsafe as it simply add all loops into the body of
334
+ // the first loop for fusion without correctness checks.
335
+ //
336
+ // Below are the two requirements to apply unsafeFuseLoops:
337
+ // * All the loops have the same parent.
338
+ // * There are no statements between these loops in their parent body.
339
+ static bool unsafeFuseLoops(const std::vector<ForPtr>& loops, ForPtr* fused);
340
+
341
+ // Loop fusion is done only when all the conditions below are satisfied.
342
+ // * All the loops have the same parent.
343
+ // * There are no statements between these loops in their parent body.
344
+ // * The start bounds are the same for all loops.
345
+ // * The stop bounds are the same for all loops.
346
+ // * Fusing the loops does not violate or add any dependencies.
347
+ static bool fuseLoops(const std::vector<ForPtr>& loops, ForPtr* fused);
348
+
349
+ static void reorderAxis(ForPtr a, ForPtr b);
350
+
351
+ // Reorder the given list of loops according to the permutation specified.
352
+ // Here `permutation[i]` represents the position of the loop in the input
353
+ // which will end up at position `i` after the reorder.
354
+ //
355
+ // For example, consider the following code:
356
+ // for p
357
+ // for q
358
+ // for r
359
+ // for s
360
+ // A[p,q,r,s] =
361
+ //
362
+ // reorder({p, q, r, s}, {2, 3, 0, 1}) will return the list of loops in the
363
+ // following form:
364
+ // for r
365
+ // for s
366
+ // for p
367
+ // for q
368
+ // A[p,q,r,s] =
369
+ static std::vector<ForPtr> reorder(
370
+ const std::vector<ForPtr>& loops,
371
+ const std::vector<size_t>& permutation);
372
+
373
+ // Tile takes a 2d domain (x, y) and splits it into small rectangular blocks
374
+ // each with shape (x_factor, y_factor). The traversal over the domain turns
375
+ // into an outer iteration over the blocks and an inner traversal over all
376
+ // points in the block.
377
+ // Note that if x dim % x_factor or y dim % y_factor does not equal to 0, the
378
+ // loop body will generate corresponding tailing loops.
379
+ // The transformation is in-place and returns 'xtail'.
380
+ //
381
+ // For example, consider the following code:
382
+ // for i: [0, 64)
383
+ // for j: [0, 64)
384
+ // for k: [0, 32)
385
+ // A[i, j] = B[i, k] + C[j, k]
386
+ //
387
+ // tile(i, j, 4, 8) will transform "i" for-stmt into the following nested
388
+ // loop:
389
+ // for i_outer: [0, 16)
390
+ // for j_outer: [0, 8)
391
+ // for i_inner: [0, 4)
392
+ // for j_inner: [0, 8)
393
+ // for k: [0, 32)
394
+ // A[i_outer * 4 + i_inner, j_outer * 8 + j_inner] =
395
+ // B[i_outer * 4 + i_inner, k] + C[j_outer * 8 + j_inner, k]
396
+ //
397
+ // tile(i, j, 4, 9) will transform "i" for-stmt into the following nested
398
+ // loop:
399
+ // for i_outer: [0, 16)
400
+ // for j_outer: [0, 7)
401
+ // for i_inner: [0, 4)
402
+ // for j_inner: [0, 9)
403
+ // for k: (0, 32)
404
+ // A[i_outer * 4 + i_inner, j_outer * 9 + j_inner] =
405
+ // B[i_outer * 4 + i_inner, k] + C[j_outer * 9 + j_inner, k]
406
+ // for j_tail: [0, 1)
407
+ // for i_inner: [0, 4)
408
+ // for k: (0, 32)
409
+ // A[i_outer * 4 + i_inner, 7 * 9 + j_tail] =
410
+ // B[i_outer * 4 + i_inner, k] + C[7 * 9 + j_tail, k]
411
+ ForPtr tile(ForPtr x, ForPtr y, int x_factor, int y_factor);
412
+
413
+ // Returns true if the given loops are perfectly nested, i.e., every loop
414
+ // (except the innermost) should have exactly one statement in its body
415
+ // and that statement must be the next inner loop.
416
+ static bool areLoopsPerfectlyNested(const std::vector<ForPtr>& loops);
417
+
418
+ // Returns true if the given loop has a loop-carried dependence.
419
+ static bool hasLoopCarriedDependence(ForPtr loop);
420
+
421
+ // Unrolls all the iterations of the given loop.
422
+ // Requires that the loop bounds are constant.
423
+ static void fullUnroll(ForPtr f, StmtPtr* unrolled);
424
+ static void fullUnroll(ForPtr f);
425
+
426
+ // Unrolls the given loop for the specified factor.
427
+ // This does not require constant bounds for the loop being unrolled.
428
+ static void unroll(ForPtr f, int factor, ForPtr* tail);
429
+ static void unroll(ForPtr f, int factor);
430
+
431
+ static bool normalize(ForPtr f);
432
+ static bool isNormalized(ForPtr f);
433
+
434
+ static bool flatten(const std::vector<ForPtr>& f, ForPtr* flattened);
435
+ static bool flatten(const std::vector<ForPtr>& f);
436
+
437
+ // Compresses the given buffer based on its use in the given Stmts.
438
+ //
439
+ // NOTE: This API assumes that there are no accesses to the given buffer
440
+ // outside the given statement. So, this should be called with the entire
441
+ // kernel statement to avoid incorrect buffer compressions.
442
+ //
443
+ // For example, given the input:
444
+ //
445
+ // for (int i = 0; i < 100; ++i) {
446
+ // for (int j = 0; j < 200; ++j) {
447
+ // A[i,j] = sin(i*j)
448
+ // }
449
+ // for (int j = 0; j < 199; ++j) {
450
+ // B[i,j] = A[i,j] + A[i, j+1]
451
+ // }
452
+ // }
453
+ //
454
+ // compressBuffer(A, ...) will compress buffer A from
455
+ // [100, 200] to [1, 200] and modify the code as follows:
456
+ //
457
+ // for (int i = 0; i < 100; ++i) {
458
+ // for (int j = 0; j < 200; ++j) {
459
+ // A[0,j] = sin(i*j)
460
+ // }
461
+ // for (int j = 0; j < 199; ++j) {
462
+ // B[i,j] = A[0,j] + A[0, j+1]
463
+ // }
464
+ // }
465
+ static void compressBuffer(BufPtr buf, StmtPtr stmt);
466
+
467
+ // Compresses all buffers in the given statement.
468
+ //
469
+ // NOTE: This API assumes that there are no accesses to buffers outside
470
+ // the given statement. So, this should be called with the entire
471
+ // kernel statement to avoid incorrect buffer compressions.
472
+ //
473
+ // TODO: Add an IR verifier check to detect invalidly compressed buffers.
474
+ static void compressAllBuffers(StmtPtr stmt);
475
+
476
+ // Get 'num' loops from the loopnest starting at 'f'.
477
+ static std::vector<ForPtr> getLoopStmtsInLoopNest(ForPtr f, size_t num);
478
+
479
+ // LoopOptions are propagated to tail.
480
+ static void sliceHead(ForPtr f, int factor, ForPtr* head, ForPtr* tail);
481
+ static void sliceHead(ForPtr f, int factor);
482
+ // LoopOptions are propagated to head.
483
+ static void sliceTail(ForPtr f, int factor, ForPtr* head, ForPtr* tail);
484
+ static void sliceTail(ForPtr f, int factor);
485
+
486
+ using AccessResult = std::pair<BufPtr, StmtPtr>;
487
+ // Insert a cache for the consumer's usages of the buffer produced in
488
+ // consumer, and redirect reads and writes in the consumer to that cache.
489
+ // Returns a pair of the new cache buffer, and the new rewritten consumer.
490
+ static AccessResult cacheAccesses(
491
+ BufPtr producer,
492
+ const std::string& name,
493
+ StmtPtr consumer);
494
+
495
+ // Insert a temporary computation of statement S in the scope of loop AT.
496
+ // S is assumed to be a Store or a Block containing a Store. Along with the
497
+ // computation itself, this transformation inserts Alloc/Free statements for
498
+ // the temporary buffer used in the computation.
499
+ static void computeAt(StmtPtr s, ForPtr at);
500
+
501
+ // Rfactor a reduction axis into a normal axis.
502
+ //
503
+ // Requirements:
504
+ // * S is the reduction store
505
+ // * S is the only statement in the innermost loop
506
+ // * There is at least two reduction arguments in S
507
+ // * OUTER_REDUCTION_FOR loop corresponds to the outermost reduction variable
508
+ // used in the store and all other reduction variables are index variables of
509
+ // children loops of OUTER_REDUCTION_FOR
510
+ // * OUTER_REDUCTION_FOR is a perfect loop nest, i.e. it has only loops
511
+ // corresponding to the other reduction variables and the store, nested into
512
+ // each other
513
+ //
514
+ // What it does:
515
+ // * Introduce a new buffer with an extra dimension of a size equal to the
516
+ // span of the loop OUTER_REDUCTION_FOR (the new buffer is returned via
517
+ // RFAC_BUF_PTR)
518
+ // * Insert an initialization store for the new buffer in
519
+ // OUTER_REDUCTION_FOR before its nested loop
520
+ // * Replace the reduction store to the original buffer with the reduction
521
+ // store to the temp buffer, removing the index var of OUTER_REDUCTION_FOR
522
+ // from reduction arguments
523
+ // * Insert a final reduction store over the extra dimension of the new
524
+ // buffer to the original buffer
525
+ // * Returns TRUE if the transformation succeeded and FALSE otherwise
526
+ //
527
+ // Example:
528
+ // Original IR:
529
+ // S1: for i # normal axis
530
+ // S2: X[i] = 0
531
+ // S3: for j # reduction axis
532
+ // S4: for k # reduction axis
533
+ // S5: X[i] = ReduceOp(X[i] + Y[i,j,k], reduce_axis={j,k})
534
+ //
535
+ // After RFACTOR(S5, S3)
536
+ // S1: for i # normal axis
537
+ // S2: X[i] = 0
538
+ // S3: for j # reduction axis for X, normal axis for X_rfac
539
+ // X_rfac[i,j] = 0
540
+ // S4: for k # reduction axis
541
+ // X_rfac[i,j] = ReduceOp(X_rfac[i,j] + Y[i,j,k], reduce_axis={k})
542
+ // X[i] = ReduceOp(X[i] + X_rfac[i,j], reduce_axis={j})
543
+ static bool rfactor(StmtPtr s, ForPtr outer_reduction_for);
544
+ static bool rfactor(
545
+ StmtPtr s,
546
+ ForPtr outer_reduction_for,
547
+ BufPtr* rfac_buf_ptr);
548
+
549
+ // Vectorize the given loop. This method requires that the given loop
550
+ // does not perform a reduction.
551
+ // It returns true if vectorization is successful and false otherwise.
552
+ static bool vectorize(ForPtr);
553
+
554
+ // Find the inner-most loops and vectorize them. Currently, this only works
555
+ // for the LLVM backend, when no reductions are involved.
556
+ void vectorizeInnerLoops();
557
+
558
+ void eliminateDeadStores();
559
+
560
+ void prepareForCodegen();
561
+
562
+ const std::unordered_set<BufPtr> getInputBufs() const;
563
+ const std::unordered_set<BufPtr> getOutputBufs() const {
564
+ return output_bufs_;
565
+ }
566
+ std::vector<BufPtr> getIntermediateBufs() const;
567
+
568
+ // Finds which is the outer For between a and b for loops. If neither of the 2
569
+ // Fors is an ancestor of the other, it returns nullptr.
570
+ static ForPtr findOuterFor(ForPtr a, ForPtr b);
571
+
572
+ private:
573
+ void initialize(
574
+ const std::vector<Tensor>& output_tensors,
575
+ const std::vector<Tensor>& tensors_to_compute);
576
+
577
+ StmtPtr root_stmt_;
578
+
579
+ std::unordered_set<BufPtr> output_bufs_;
580
+ };
581
+
582
+ TORCH_API StmtPtr FlattenIndexes(StmtPtr s);
583
+
584
+ // TODO: Revisit this once we decide on how dependencies analysis should look
585
+ // like. Maybe we would choose to use a different API and BufUse would be
586
+ // removed, or if we decide to keep it we need to properly document its API.
587
+ struct BufLoadOrStoreUse {
588
+ StmtPtr s;
589
+ bool isStore;
590
+ };
591
+
592
+ /*
593
+ * Returns a map ( Buf -> uses of this Buf), uses are represented as vectors of
594
+ * BufUse elements, which are StmtPtr and a bool isStore flag. The order of uses
595
+ * in the vectors reflects the order in which the uses appear in the given
596
+ * statement.
597
+ */
598
+ std::unordered_map<BufPtr, std::vector<BufLoadOrStoreUse>> findLoadOrStoreUses(
599
+ StmtPtr s);
600
+
601
+ // replaces all invalid characters with underscore
602
+ TORCH_API std::string sanitizeName(const std::string& input_name);
603
+
604
+ } // namespace tensorexpr
605
+ } // namespace jit
606
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest_randomization.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ namespace tensorexpr {
6
+
7
+ // Applies a series of loop optimizations chosen randomly. This is only for
8
+ // testing purposes. This allows automatic stress testing of NNC loop
9
+ // transformations.
10
+ void loopnestRandomization(int64_t seed, LoopNest& l);
11
+ } // namespace tensorexpr
12
+ } // namespace jit
13
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/lowerings.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file defines classes for registering standard lowerings from JIT to TE
2
+ // IR.
3
+ #pragma once
4
+
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/runtime/interpreter.h>
7
+ #include <torch/csrc/jit/tensorexpr/analysis.h>
8
+ #include <torch/csrc/jit/tensorexpr/codegen.h>
9
+ #include <torch/csrc/jit/tensorexpr/tensor.h>
10
+
11
+ namespace torch {
12
+ namespace jit {
13
+ namespace tensorexpr {
14
+
15
+ using ArgNone = std::monostate;
16
+ using BufList = std::vector<tensorexpr::BufHandle>;
17
+ using DoubleList = std::vector<double>;
18
+ using IntList = std::vector<int64_t>;
19
+ using ArgValue = std::variant<
20
+ tensorexpr::BufHandle,
21
+ tensorexpr::VarHandle,
22
+ double,
23
+ int64_t,
24
+ bool,
25
+ BufList,
26
+ DoubleList,
27
+ IntList,
28
+ std::string,
29
+ ArgNone>;
30
+
31
+ using NNCLoweringFunction = std::function<Tensor(
32
+ const std::vector<ArgValue>&,
33
+ const std::vector<ExprHandle>&,
34
+ const std::vector<ExprHandle>&,
35
+ const c10::optional<ScalarType>&,
36
+ at::Device)>;
37
+
38
+ TORCH_API FunctionSchemaMap<NNCLoweringFunction>& getNNCLoweringRegistry();
39
+ TORCH_API NNCLoweringFunction getStandardLoweringFor(const std::string& op);
40
+
41
+ struct RegisterNNCLoweringsFunction {
42
+ RegisterNNCLoweringsFunction(
43
+ const std::vector<std::string>& schemas,
44
+ NNCLoweringFunction fn);
45
+ };
46
+
47
+ } // namespace tensorexpr
48
+ } // namespace jit
49
+ } // namespace torch