applied-ai-018 commited on
Commit
9b5270d
·
verified ·
1 Parent(s): e708493

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/10.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/ADInterpreters.h +38 -0
  4. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchRulesHelper.h +475 -0
  5. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedFallback.h +81 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedTensorImpl.h +170 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchingMetaprogramming.h +126 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/DynamicLayer.h +124 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/FunctionalizeInterpreter.h +22 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/Interpreter.h +208 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/LegacyVmapTransforms.h +187 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/Macros.h +3 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/PlumbingHelper.h +63 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/TensorWrapper.h +103 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/functorch/VmapInterpreter.h +25 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h +31 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h +18 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h +353 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h +130 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/Copy.h +10 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/PackedParams.h +147 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/BinaryOps.h +8 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/EmbeddingPackedParams.h +29 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/OnednnUtils.h +445 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QnnpackUtils.h +527 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantUtils.h +239 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantizedOps.h +258 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/RuyUtils.h +21 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/XnnpackUtils.h +335 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/conv_serialization.h +414 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/fbgemm_utils.h +411 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/init_qnnpack.h +13 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag.h +34 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag_prepack.h +13 -0
  35. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h +31 -0
  36. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAllocatorConfig.h +124 -0
  37. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h +481 -0
  38. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h +96 -0
  39. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h +164 -0
  40. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAException.h +100 -0
  41. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAFunctions.h +116 -0
  42. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGraphsC10Utils.h +91 -0
  43. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGuard.h +303 -0
  44. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMacros.h +51 -0
  45. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMathCompat.h +152 -0
  46. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMiscFunctions.h +12 -0
  47. venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAStream.h +271 -0
  48. venv/lib/python3.10/site-packages/torch/include/c10/cuda/driver_api.h +49 -0
  49. venv/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h +212 -0
  50. venv/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h +9 -0
ckpts/universal/global_step120/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d152536c62cc6cf79aa52bdcf0f0b7f2230b70b4f557dc3f32337983dec357
3
+ size 33555627
ckpts/universal/global_step120/zero/10.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d67da676bc543e5cf8caf679c6f59986b60eb3f7c042f65cd3d4188787b6ed6b
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/ADInterpreters.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/functorch/Interpreter.h>
3
+
4
+ namespace at::functorch {
5
+
6
+ // These are the interpreters for our AD transforms
7
+ // (grad, vjp and jvp).
8
+ // See NOTE: [functorch interpreter stack] for more details.
9
+
10
+ struct TORCH_API GradInterpreterPtr {
11
+ explicit GradInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Grad); }
12
+ TransformType key() const { return base_->key(); }
13
+ int64_t level() const { return base_->level(); }
14
+ void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
15
+ void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
16
+ bool prevGradMode() const {
17
+ return std::get<GradInterpreterMeta>(base_->meta()).prevGradMode_;
18
+ }
19
+ Tensor lift(const Tensor& tensor) const;
20
+ private:
21
+ const Interpreter* base_;
22
+ };
23
+
24
+ struct TORCH_API JvpInterpreterPtr {
25
+ explicit JvpInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Jvp); }
26
+ TransformType key() const { return base_->key(); }
27
+ int64_t level() const { return base_->level(); }
28
+ void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
29
+ void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
30
+ bool prevFwdGradMode() const {
31
+ return std::get<JvpInterpreterMeta>(base_->meta()).prevFwdGradMode_;
32
+ }
33
+ Tensor lift(const Tensor& tensor) const;
34
+ private:
35
+ const Interpreter* base_;
36
+ };
37
+
38
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchRulesHelper.h ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+ #pragma once
7
+
8
+ #include <c10/util/TypeList.h>
9
+
10
+ #include <ATen/ATen.h>
11
+ #include <ATen/Operators.h>
12
+
13
+ #include <ATen/functorch/DynamicLayer.h>
14
+ #include <ATen/functorch/TensorWrapper.h>
15
+ #include <ATen/functorch/BatchingMetaprogramming.h>
16
+ #include <ATen/functorch/LegacyVmapTransforms.h>
17
+ #include <ATen/functorch/BatchedFallback.h>
18
+ #include <ATen/functorch/PlumbingHelper.h>
19
+ #include <ATen/core/dispatch/Dispatcher.h>
20
+ #include <ATen/VmapGeneratedPlumbing.h>
21
+
22
+ #include <utility>
23
+
24
+ // This file contains helper functions for batching rules.
25
+
26
+ namespace at::functorch {
27
+
28
+ TORCH_API Tensor reshape_dim_into(int64_t src, int64_t dst, const Tensor& x);
29
+ TORCH_API Tensor reshape_dim_outof(int64_t src, int64_t size1, const Tensor& x);
30
+
31
+ TORCH_API Tensor reshape_dim_outof_symint(int64_t src, c10::SymInt size1, const Tensor& x);
32
+
33
+ Tensor moveBatchDimToFront(const Tensor& tensor, optional<int64_t> maybe_batch_dim);
34
+ int64_t rankWithoutBatchDim(const Tensor& tensor, optional<int64_t> maybe_batch_dim);
35
+ int64_t numelWithoutBatchDim(const Tensor& tensor, optional<int64_t> maybe_batch_dim);
36
+ optional<int64_t> valIfNonempty(optional<int64_t> maybe_empty, int64_t new_val);
37
+ int64_t getPhysicalDim(const Tensor& tensor, bool has_batch_dim, int64_t logical_dim);
38
+ VmapDimVector getPhysicalDims(const Tensor& tensor, bool has_batch_dim, IntArrayRef logical_dims);
39
+
40
+ void vmapIncompatibleInplaceError(const char* schema_name);
41
+
42
+ Tensor maybePadToLogicalRank(const Tensor& tensor, optional<int64_t> has_bdim, int64_t logical_rank);
43
+
44
+ void check_randomness(RandomnessType randomness);
45
+ void check_randomness(RandomnessType randomness, bool any_tensor_bdim);
46
+
47
+ inline Tensor ensure_has_bdim(const Tensor& tensor, bool has_bdim, c10::SymInt batch_size) {
48
+ if (has_bdim) {
49
+ return tensor;
50
+ }
51
+ const auto sizes = tensor.sym_sizes();
52
+ SymDimVector expanded_shape;
53
+ expanded_shape.reserve(sizes.size());
54
+ expanded_shape.emplace_back(std::move(batch_size));
55
+ expanded_shape.insert(expanded_shape.end(), sizes.begin(), sizes.end());
56
+ return tensor.expand_symint(expanded_shape);
57
+ }
58
+
59
+ #define VMAP_SUPPORT(op, batch_rule) \
60
+ m.impl(#op, op ## _generated_plumbing<decltype(&batch_rule), &batch_rule>);
61
+
62
+ #define VMAP_SUPPORT2(op, overload, batch_rule) \
63
+ m.impl(#op "." #overload, op ## _ ## overload ## _generated_plumbing<decltype(&batch_rule), &batch_rule>);
64
+
65
+ #define OP_DECOMPOSE(op) m.impl(#op, static_cast<decltype(&ATEN_FN(op))>(native::op));
66
+ #define OP_DECOMPOSE2(op, overload) m.impl(#op"."#overload, static_cast<decltype(&ATEN_FN2(op, overload))>(native::op));
67
+
68
+ // DO NOT USE ME DIRECTLY! Use BASIC_UNARY_BATCH_RULE to save yourself some pain
69
+ template <typename A, A a, typename C>
70
+ struct BasicUnaryBatchRuleHelper;
71
+
72
+ template <typename F, F Func, typename A, typename... T>
73
+ struct BasicUnaryBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
74
+ static std::tuple<Tensor,optional<int64_t>> apply(
75
+ const Tensor& tensor,
76
+ optional<int64_t> batch_dim,
77
+ T... extra_args) {
78
+ return std::make_tuple(Func(tensor, std::forward<T>(extra_args)...), batch_dim);
79
+ }
80
+ };
81
+
82
+ // USAGE: BASIC_UNARY_BATCH_RULE(at::sin)
83
+ // INCORRECT USAGE: BASIC_UNARY_BATCH_RULE(&at::sin)
84
+ // It is important that this macro is not passed a function pointer!!
85
+ #define BASIC_UNARY_BATCH_RULE(fn) SINGLE_ARG(\
86
+ BasicUnaryBatchRuleHelper<\
87
+ decltype(&fn),\
88
+ &fn,\
89
+ c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
90
+
91
+ #define UNARY_POINTWISE(op) \
92
+ VMAP_SUPPORT(op, BASIC_UNARY_BATCH_RULE(ATEN_FN(op)));
93
+
94
+ template <typename A, A a, typename C>
95
+ struct VariadicBdimsBatchRuleHelper;
96
+
97
+ template <typename F, F Func, typename A, typename... T>
98
+ struct VariadicBdimsBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
99
+ static std::tuple<Tensor,optional<int64_t>> apply(
100
+ const Tensor& tensor,
101
+ optional<int64_t> batch_dim,
102
+ T... extra_args) {
103
+ auto tensor_ = moveBatchDimToFront(tensor, batch_dim);
104
+ return std::make_tuple(Func(tensor_, std::forward<T>(extra_args)...), 0);
105
+ }
106
+ };
107
+
108
+ // USAGE: VARIADIC_BDIMS_BATCH_RULE(at::cholesky_inverse)
109
+ // INCORRECT USAGE: VARIADIC_BDIMS_BATCH_RULE(&at::cholesky_inverse)
110
+ // It is important that this macro is not passed a function pointer!!
111
+ #define VARIADIC_BDIMS_BATCH_RULE(fn) SINGLE_ARG(\
112
+ VariadicBdimsBatchRuleHelper<\
113
+ decltype(&fn),\
114
+ &fn,\
115
+ c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
116
+
117
+ #define VARIADIC_BDIMS(op) \
118
+ VMAP_SUPPORT(op, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN(op)));
119
+
120
+ #define VARIADIC_BDIMS2(op, overload) \
121
+ VMAP_SUPPORT2(op, overload, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN2(op, overload)));
122
+
123
+ template<class F, F Func>
124
+ void boxed_tensor_inputs_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
125
+ const auto& schema = op.schema();
126
+ const auto num_returns = schema.returns().size();
127
+ const auto num_arguments = schema.arguments().size();
128
+
129
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
130
+ auto maybe_layer = maybeCurrentDynamicLayer();
131
+ vmap_check_escaped(maybe_layer, "boxed_tensor_inputs_batch_rule");
132
+
133
+ int64_t cur_level = maybe_layer->layerId();
134
+
135
+ auto orig_arguments = torch::jit::last(*stack, num_arguments);
136
+ if (std::none_of(orig_arguments.begin(), orig_arguments.end(), ivalueParticipatesInCurrentLevel)) {
137
+ op.callBoxed(stack);
138
+ return;
139
+ }
140
+
141
+ auto arguments = torch::jit::pop(*stack, num_arguments);
142
+ std::vector<std::pair<Tensor, optional<int64_t>>> tensor_inputs;
143
+ std::vector<int64_t> tensor_pos;
144
+ for (const auto idx : c10::irange(0, num_arguments)) {
145
+ const auto& ivalue = arguments[idx];
146
+ if (ivalue.isTensor()) {
147
+ auto [tensor_value, tensor_bdim] = unwrapTensorAtLevel(ivalue.toTensor(), cur_level);
148
+ tensor_inputs.emplace_back(tensor_value, tensor_bdim);
149
+ tensor_pos.push_back(idx);
150
+ }
151
+ }
152
+ Func(tensor_inputs);
153
+
154
+ size_t tensor_idx = 0;
155
+ TORCH_INTERNAL_ASSERT(!tensor_pos.empty());
156
+ for (const auto arg_idx : c10::irange(0, num_arguments)) {
157
+ if (tensor_idx >= tensor_pos.size() || (int64_t)arg_idx != tensor_pos[tensor_idx]) {
158
+ torch::jit::push(stack, arguments[arg_idx]);
159
+ } else {
160
+ TORCH_INTERNAL_ASSERT(tensor_idx < tensor_inputs.size());
161
+ torch::jit::push(stack, tensor_inputs[tensor_idx].first);
162
+ tensor_idx++;
163
+ }
164
+ }
165
+
166
+ op.callBoxed(stack);
167
+ const auto returns = torch::jit::pop(*stack, num_returns);
168
+ for (const auto& ret : returns) {
169
+ if (ret.isTensor()) {
170
+ torch::jit::push(stack, makeBatched(ret.toTensor(), 0, cur_level));
171
+ } else {
172
+ TORCH_INTERNAL_ASSERT(false, "This boxed batching rule does not currently support ops that return non-tensor values");
173
+ }
174
+ }
175
+ }
176
+
177
+ inline void handle_pointwise_ops(std::vector<std::pair<Tensor, optional<int64_t>>> &tensor_inputs) {
178
+ int64_t out_logical_rank = 0;
179
+ for (auto& tensor_input : tensor_inputs) {
180
+ int64_t cur_logical_rank = rankWithoutBatchDim(tensor_input.first, tensor_input.second);
181
+ out_logical_rank = std::max(out_logical_rank, cur_logical_rank);
182
+ }
183
+ for (auto& tensor_input: tensor_inputs) {
184
+ tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second);
185
+ tensor_input.first = maybePadToLogicalRank(tensor_input.first, tensor_input.second, out_logical_rank);
186
+ }
187
+ }
188
+
189
+ #define POINTWISE_BOXED(op) \
190
+ m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_pointwise_ops), &handle_pointwise_ops>>());
191
+
192
+ #define POINTWISE_BOXED2(op, overload) \
193
+ m.impl(#op "." #overload, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_pointwise_ops), &handle_pointwise_ops>>());
194
+
195
+ inline void handle_variadic_bdims(std::vector<std::pair<Tensor, optional<int64_t>>> &tensor_inputs) {
196
+ for (auto & tensor_input : tensor_inputs) {
197
+ tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second);
198
+ }
199
+ }
200
+
201
+ #define VARIADIC_BDIMS_BOXED(op) \
202
+ m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_variadic_bdims), &handle_variadic_bdims>>());
203
+
204
+ using UnpackedBatchedTensor = std::tuple<Tensor,optional<int64_t>>;
205
+
206
+ inline void find_and_unpack_tensors(
207
+ const torch::jit::Stack* stack,
208
+ int64_t num_args,
209
+ int64_t cur_level,
210
+ SmallVector<UnpackedBatchedTensor, 5>* tensors,
211
+ SmallVector<int64_t, 5>* tensors_pos,
212
+ int64_t* batch_size) {
213
+
214
+ int64_t computed_batch_size = -1;
215
+ int64_t args_begin = stack->size() - num_args;
216
+
217
+ for (const auto idx : c10::irange(0, num_args)) {
218
+ const auto& ivalue = (*stack)[args_begin + idx];
219
+ if (!ivalue.isTensor()) {
220
+ continue;
221
+ }
222
+ auto unpacked = unwrapTensorAtLevel(ivalue.toTensor(), cur_level);
223
+ const auto& tensor_value = std::get<0>(unpacked);
224
+ const auto tensor_bdim = std::get<1>(unpacked);
225
+ if (tensor_bdim.has_value()) {
226
+ auto candidate_batch_size = tensor_value.size(*tensor_bdim);
227
+ if (computed_batch_size == -1) {
228
+ computed_batch_size = candidate_batch_size;
229
+ }
230
+ TORCH_INTERNAL_ASSERT(candidate_batch_size == computed_batch_size);
231
+ }
232
+
233
+ tensors->push_back(std::move(unpacked));
234
+ tensors_pos->push_back(idx);
235
+ }
236
+ TORCH_INTERNAL_ASSERT(computed_batch_size > -1);
237
+ *batch_size = computed_batch_size;
238
+ }
239
+
240
+ inline void boxed_existing_bdim_all_batch_rule(
241
+ const c10::OperatorHandle& op, torch::jit::Stack* stack) {
242
+ const auto& schema = op.schema();
243
+ const auto num_returns = schema.returns().size();
244
+ const auto num_arguments = schema.arguments().size();
245
+
246
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
247
+ auto maybe_layer = maybeCurrentDynamicLayer();
248
+ vmap_check_escaped(maybe_layer, "boxed_existing_bdim_all_batch_rule");
249
+ int64_t cur_level = maybe_layer->layerId();
250
+
251
+ const auto arguments = torch::jit::last(stack, num_arguments);
252
+ if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) {
253
+ op.callBoxed(stack);
254
+ return;
255
+ }
256
+
257
+ int64_t args_begin = stack->size() - num_arguments;
258
+ SmallVector<UnpackedBatchedTensor, 5> tensor_inputs;
259
+ SmallVector<int64_t, 5> tensor_pos;
260
+ int64_t batch_size;
261
+
262
+ find_and_unpack_tensors(
263
+ stack, num_arguments, cur_level,
264
+ &tensor_inputs, &tensor_pos, &batch_size);
265
+
266
+ // for each tensor, ensure it has a bdim and reshape it.
267
+ for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) {
268
+ const auto& value = std::get<0>(tensor_inputs[tensor_idx]);
269
+ auto bdim = std::get<1>(tensor_inputs[tensor_idx]);
270
+ auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size);
271
+ if (!bdim.has_value()) {
272
+ bdim = 0;
273
+ }
274
+ (*stack)[args_begin + tensor_pos[tensor_idx]] = reshape_dim_into(*bdim, 0, value_);
275
+ }
276
+
277
+ op.callBoxed(stack);
278
+
279
+ for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) {
280
+ const auto& ret = (*stack)[idx];
281
+ TORCH_INTERNAL_ASSERT(ret.isTensor(),
282
+ "This boxed batching rule does not currently support ops that return non-tensor values");
283
+ (*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level);
284
+ }
285
+ }
286
+
287
+ // Use when all tensors arguments accept one (normal) batch dim.
288
+ // This batching rule expands the batch dim on all Tensors, reshapes it into
289
+ // dim 0, calls the op, and then reshapes the batch dim out of dim 0.
290
+ // This is not the most efficient thing; if there are alternatives, plese try
291
+ // to use them. Use this only as a last resort.
292
+ #define EXISTING_BDIM_ALL_BOXED(op) \
293
+ m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_existing_bdim_all_batch_rule>());
294
+
295
+ template <int64_t feature_rank, int64_t contig_tensor_index=-1>
296
+ inline void boxed_all_tensors_have_optional_bdim(
297
+ const c10::OperatorHandle& op, torch::jit::Stack* stack) {
298
+ const auto& schema = op.schema();
299
+ const auto num_returns = schema.returns().size();
300
+ const auto num_arguments = schema.arguments().size();
301
+
302
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
303
+ auto maybe_layer = maybeCurrentDynamicLayer();
304
+ vmap_check_escaped(maybe_layer, "boxed_all_tensors_have_optional_bdim");
305
+ int64_t cur_level = maybe_layer->layerId();
306
+
307
+ const auto arguments = torch::jit::last(stack, num_arguments);
308
+ if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) {
309
+ op.callBoxed(stack);
310
+ return;
311
+ }
312
+
313
+ int64_t args_begin = stack->size() - num_arguments;
314
+ SmallVector<UnpackedBatchedTensor, 5> tensor_inputs;
315
+ SmallVector<int64_t, 5> tensor_pos;
316
+ int64_t batch_size;
317
+
318
+ find_and_unpack_tensors(
319
+ stack, num_arguments, cur_level,
320
+ &tensor_inputs, &tensor_pos, &batch_size);
321
+
322
+ optional<bool> is_no_batch_dim_case;
323
+
324
+ for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) {
325
+ const auto& value = std::get<0>(tensor_inputs[tensor_idx]);
326
+ auto bdim = std::get<1>(tensor_inputs[tensor_idx]);
327
+ const auto logical_rank = rankWithoutBatchDim(value, bdim);
328
+
329
+ if (!is_no_batch_dim_case.has_value()) {
330
+ is_no_batch_dim_case = (logical_rank == feature_rank);
331
+ }
332
+ auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size);
333
+ if (!bdim.has_value()) {
334
+ bdim = 0;
335
+ }
336
+ if (*is_no_batch_dim_case) {
337
+ TORCH_INTERNAL_ASSERT(logical_rank == feature_rank);
338
+ value_ = moveBatchDimToFront(value_, bdim);
339
+ if (tensor_idx == contig_tensor_index) {
340
+ value_ = value_.contiguous();
341
+ }
342
+ (*stack)[args_begin + tensor_pos[tensor_idx]] = std::move(value_);
343
+ continue;
344
+ }
345
+ TORCH_INTERNAL_ASSERT(logical_rank == feature_rank + 1);
346
+ value_ = reshape_dim_into(*bdim, 0, value_);
347
+ if (tensor_idx == contig_tensor_index) {
348
+ value_ = value_.contiguous();
349
+ }
350
+ (*stack)[args_begin + tensor_pos[tensor_idx]] = std::move(value_);
351
+ }
352
+
353
+ op.callBoxed(stack);
354
+
355
+ for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) {
356
+ const auto& ret = (*stack)[idx];
357
+ TORCH_INTERNAL_ASSERT(ret.isTensor(),
358
+ "This boxed batching rule does not currently support ops that return non-tensor values");
359
+ if (*is_no_batch_dim_case) {
360
+ (*stack)[idx] = makeBatched(ret.toTensor(), 0, cur_level);
361
+ } else {
362
+ (*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level);
363
+ }
364
+ }
365
+ }
366
+
367
+ // Useful for many NN operators.
368
+ // The operator must satisfy the following:
369
+ // - All arguments must accept an optional batch dim.
370
+ // - All arguments must be the same rank
371
+ #define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED(feature_rank, op) \
372
+ m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_all_tensors_have_optional_bdim<feature_rank>>());
373
+
374
+ #define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED_CONTIG1(feature_rank, op, contig_tensor_index) \
375
+ m.impl(#op, \
376
+ torch::CppFunction::makeFromBoxedFunction<\
377
+ boxed_all_tensors_have_optional_bdim<\
378
+ feature_rank, \
379
+ contig_tensor_index>\
380
+ >());
381
+
382
+ template <typename A, A a, typename C>
383
+ struct ExistingBdimBatchRuleHelper;
384
+
385
+ template <typename F, F Func, typename A, typename... T>
386
+ struct ExistingBdimBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
387
+ static std::tuple<Tensor,optional<int64_t>> apply(
388
+ const Tensor& self,
389
+ optional<int64_t> self_bdim,
390
+ T... extra_args) {
391
+ auto self_ = reshape_dim_into(*self_bdim, 0, self);
392
+ auto out = Func(self_, std::forward<T>(extra_args)...);
393
+ return std::make_tuple(reshape_dim_outof_symint(0, self.sym_sizes()[*self_bdim], out), 0);
394
+ }
395
+ };
396
+
397
+ // USAGE: EXISTING_BDIM_BATCH_RULE(at::cholesky_inverse)
398
+ // INCORRECT USAGE: EXISTING_BDIM_BATCH_RULE(&at::cholesky_inverse)
399
+ // It is important that this macro is not passed a function pointer!!
400
+ #define EXISTING_BDIM_BATCH_RULE(fn) SINGLE_ARG(\
401
+ ExistingBdimBatchRuleHelper<\
402
+ decltype(&fn),\
403
+ &fn,\
404
+ c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
405
+
406
+
407
+ #define EXISTING_BDIM(op) \
408
+ VMAP_SUPPORT(op, EXISTING_BDIM_BATCH_RULE(ATEN_FN(op)));
409
+
410
+ #define EXISTING_BDIM2(op, overload) \
411
+ VMAP_SUPPORT2(op, overload, EXISTING_BDIM_BATCH_RULE(ATEN_FN2(op, overload)));
412
+
413
+ #define INVOKE(object,ptrToMember) ((object).*(ptrToMember))
414
+
415
+
416
+ template <typename F, F Method, typename... ExtraArgs>
417
+ Tensor& unary_inplace_batch_rule(Tensor& self, optional<int64_t>, ExtraArgs... extra_args) {
418
+ INVOKE(self, Method)(std::forward<ExtraArgs>(extra_args)...);
419
+ return self;
420
+ }
421
+
422
+ inline int64_t get_bdim_size4(
423
+ const Tensor& a_value, optional<int64_t> a_bdim,
424
+ const Tensor& b_value, optional<int64_t> b_bdim,
425
+ const Tensor& c_value, optional<int64_t> c_bdim,
426
+ const Tensor& d_value, optional<int64_t> d_bdim) {
427
+ if (a_bdim)
428
+ return a_value.size(*a_bdim);
429
+ if (b_bdim)
430
+ return b_value.size(*b_bdim);
431
+ if (c_bdim)
432
+ return c_value.size(*c_bdim);
433
+ if (d_bdim)
434
+ return d_value.size(*d_bdim);
435
+ TORCH_INTERNAL_ASSERT(false);
436
+ }
437
+
438
+ inline int64_t get_bdim_size3(
439
+ const Tensor& a_value, optional<int64_t> a_bdim,
440
+ const Tensor& b_value, optional<int64_t> b_bdim,
441
+ const Tensor& c_value, optional<int64_t> c_bdim) {
442
+ if (a_bdim)
443
+ return a_value.size(*a_bdim);
444
+ if (b_bdim)
445
+ return b_value.size(*b_bdim);
446
+ if (c_bdim)
447
+ return c_value.size(*c_bdim);
448
+ TORCH_INTERNAL_ASSERT(false);
449
+ }
450
+
451
+ inline int64_t get_bdim_size2(
452
+ const Tensor& a_value, optional<int64_t> a_bdim,
453
+ const Tensor& b_value, optional<int64_t> b_bdim) {
454
+ if (a_bdim)
455
+ return a_value.size(*a_bdim);
456
+ if (b_bdim)
457
+ return b_value.size(*b_bdim);
458
+ TORCH_INTERNAL_ASSERT(false);
459
+ }
460
+
461
+ // [start, start + 1, ..., stop - 1]
462
+ inline VmapDimVector range(int64_t start, int64_t stop) {
463
+ TORCH_INTERNAL_ASSERT(stop >= start);
464
+ VmapDimVector dims;
465
+ dims.reserve(stop - start);
466
+ for (int64_t i = start; i < stop; i++) {
467
+ dims.emplace_back(i);
468
+ }
469
+ return dims;
470
+ }
471
+ std::tuple<Tensor, Tensor> _binary_pointwise_helper(
472
+ const Tensor& tensor, optional<int64_t> tensor_batch_dim, const Tensor& other, optional<int64_t> other_batch_dim,
473
+ bool do_type_promotion=true);
474
+
475
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedFallback.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+ #include <ATen/ATen.h>
9
+ #include <ATen/core/op_registration/op_registration.h>
10
+ #include <torch/library.h>
11
+
12
+ namespace at::functorch {
13
+
14
+ // This file contains code for the vmap fallback (also known as the
15
+ // BatchedTensor fallback or the Batched fallback). This code runs
16
+ // when an operation doesn't have a batching rule implemented.
17
+
18
+ // If an operator doesn't have a batching rule implemented then we fallback
19
+ // to this implementation. The fallback doesn't work on out= variants or
20
+ // view operations; that is, it works for out-of-place operations and
21
+ // in-place non-view operations.
22
+ //
23
+ // For out-of-place operations, the fallback effectively takes all of the
24
+ // BatchedTensors in `stack`, slices them, and runs `op` on all of the
25
+ // corresponding slices to produce slices of the outputs. The output slices
26
+ // then get `torch.stack`ed to create the
27
+ // final returns.
28
+ //
29
+ // The performance of the fallback is not very good because it introduces an
30
+ // extra copy from stacking the sliced outputs. Because of this, we prefer to
31
+ // write batching rules for operators whenever possible.
32
+ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
33
+ void batchedNestedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
34
+
35
+ void vmapErrorFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
36
+
37
+ // The vmap fallback emits a warning by default, but it may be disabled if
38
+ // the user finds it to be too annoying.
39
+ TORCH_API bool isVmapFallbackWarningEnabled();
40
+ TORCH_API void setVmapFallbackWarningEnabled(bool enabled);
41
+
42
+ // Used for testing. The vmap fallback is enabled by default. When it is disabled,
43
+ // it raises an error.
44
+ TORCH_API bool isVmapFallbackEnabled();
45
+ TORCH_API void setVmapFallbackEnabled(bool enabled);
46
+
47
+ template <typename A> A vector_to_result(const std::vector<IValue>& buffer) {
48
+ return buffer[0].to<A>();
49
+ }
50
+ template <typename A, typename B> std::tuple<A, B> vector_to_result(const std::vector<IValue>& buffer) {
51
+ return std::make_tuple(buffer[0].to<A>(), buffer[1].to<B>());
52
+ }
53
+ template <typename A, typename B, typename C> std::tuple<A, B, C> vector_to_result(const std::vector<IValue>& buffer) {
54
+ return std::make_tuple(buffer[0].to<A>(), buffer[1].to<B>(), buffer[2].to<B>());
55
+ }
56
+
57
+ // slow_fallback is a way to call the vmap fallback inside some boxed kernel.
58
+ // There is probably some better way to metaprogram this.
59
+ template <typename Ret>
60
+ Ret slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
61
+ std::vector<IValue> stack(args.begin(), args.end());
62
+ batchedTensorForLoopFallback(op, &stack);
63
+ return vector_to_result<Ret>(stack);
64
+ }
65
+
66
+ template <typename A, typename B>
67
+ std::tuple<A, B> slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
68
+ std::vector<IValue> stack(args.begin(), args.end());
69
+ batchedTensorForLoopFallback(op, &stack);
70
+ return vector_to_result<A, B>(stack);
71
+ }
72
+
73
+ template <typename A, typename B, typename C>
74
+ std::tuple<A, B, C> slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
75
+ std::vector<IValue> stack(args.begin(), args.end());
76
+ batchedTensorForLoopFallback(op, &stack);
77
+ return vector_to_result<A, B, C>(stack);
78
+ }
79
+
80
+
81
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedTensorImpl.h ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <bitset>
10
+ #include <utility>
11
+
12
+ #include <ATen/ArrayRef.h>
13
+ #include <ATen/SmallVector.h>
14
+ #include <ATen/Tensor.h>
15
+
16
+ namespace at::functorch {
17
+
18
+ using Tensor = at::Tensor;
19
+
20
+ // We assume this in a few other places in the codebase,
21
+ // but there isn't a centralized definition.
22
+ constexpr int64_t kVmapMaxTensorDims = 64;
23
+
24
+ // The valid vmap levels range from [0, 64). This effectively means that we
25
+ // support a maximum of 64 nested vmaps.
26
+ constexpr int64_t kVmapNumLevels = 64;
27
+
28
+ // Store this number of elements of BatchDims on the stack. Most people will
29
+ // probably use <= 5 nested vmaps, but adjust this number as necessary.
30
+ constexpr int64_t kBatchDimsStackSize = 5;
31
+
32
+ // A BatchedTensorImpl holds an underlying Tensor and a single batch dim
33
+ // NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
34
+ // BatchedTensorImpl.
35
+ //
36
+ // The batch dimensions are treated as being "private"; they are not user-visible.
37
+ // For example, in the following Tensor,
38
+ // bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
39
+ // dimension 0 is batch dimension.
40
+ //
41
+ // bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public)
42
+ // dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) tensor.
43
+ struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
44
+ explicit BatchedTensorImpl(at::DispatchKeySet key_set, Tensor value, int64_t dim, int64_t level);
45
+
46
+ // Returns batch dimension of this tensor
47
+ int64_t bdim() const { return bdim_; }
48
+
49
+ // Returns batch dimension of this tensor
50
+ int64_t level() const { return level_; }
51
+
52
+ // BatchedTensorImpl wraps a Tensor
53
+ const Tensor& value() const { return value_; }
54
+
55
+ // Given a public dimension index, return the dimension index in the underlying
56
+ // value() tensor.
57
+ // For example, if we have
58
+ // bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
59
+ // bt.actualDim(0) -> 1
60
+ // bt.actualDim(1) -> 2
61
+ // bt.actualDim(2) -> 3
62
+ // bt.actualDim(3) -> Error
63
+ int64_t actualDim(int64_t dim, bool wrap_dim = true) const;
64
+
65
+ IntArrayRef sizes_custom() const override;
66
+ SymIntArrayRef sym_sizes_custom() const override;
67
+ int64_t size_custom(int64_t d) const override;
68
+ c10::SymInt sym_size_custom(int64_t d) const override;
69
+ // We have to override this because we opted into CustomStrides
70
+ IntArrayRef strides_custom() const override;
71
+ SymIntArrayRef sym_strides_custom() const override;
72
+ // Override a bunch of methods inherited from TensorImpl to return error messages.
73
+ bool is_contiguous_custom(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const override;
74
+ void set_size(int64_t dim, int64_t new_size) override;
75
+ void set_stride(int64_t dim, int64_t new_stride) override;
76
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
77
+ const c10::VariableVersion& version_counter,
78
+ bool allow_tensor_metadata_change) const override;
79
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
80
+ c10::VariableVersion&& version_counter,
81
+ bool allow_tensor_metadata_change) const override;
82
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
83
+ #ifdef DEBUG
84
+ bool has_storage() const override;
85
+ #endif
86
+
87
+ void refreshTensorMetadata();
88
+
89
+ // Used in torchdim. torchdim uses non-lexical BatchedTensor; the way it
90
+ // accomplishes this is a hack where it is able to modify the levels of
91
+ // BatchedTensor to match the level of the current vmap transform.
92
+ void _unsafe_set_level(int64_t level) {
93
+ level_ = level;
94
+ }
95
+
96
+ // Used in batching rule for in-place view operations that can change
97
+ // the index of the bdim (think squeeze_, unsqueeze_)
98
+ void unsafe_set_bdim(int64_t bdim) {
99
+ // NB: you MUST call refreshTensorMetadata after doing this.
100
+ bdim_ = bdim;
101
+ }
102
+ private:
103
+ // see NOTE: [BatchedTensorImpl levels invariant]
104
+ void checkInvariants() const;
105
+ const char* tensorimpl_type_name() const override;
106
+
107
+ Tensor value_;
108
+
109
+ int64_t level_;
110
+ int64_t bdim_;
111
+ };
112
+
113
+ // NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
114
+ // BatchedTensorImpl.
115
+ inline bool isBatchedTensor(const Tensor& tensor) {
116
+ return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::FuncTorchBatched) ||
117
+ tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::BatchedNestedTensor);
118
+ }
119
+
120
+ // It is unsafe to call this on a Tensor that is not backed by a
121
+ // BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
122
+ inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) {
123
+ return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
124
+ }
125
+
126
+ inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) {
127
+ if (!isBatchedTensor(tensor)) {
128
+ return nullptr;
129
+ }
130
+ return unsafeGetBatchedImpl(std::move(tensor));
131
+ }
132
+
133
+ // Returns a bitset. If bit i is set, then that means dim i is a batchdim.
134
+ inline std::bitset<kVmapMaxTensorDims> createBatchDimBitset(int64_t dim) {
135
+ std::bitset<kVmapMaxTensorDims> is_bdim;
136
+ is_bdim.set(dim);
137
+ return is_bdim;
138
+ }
139
+
140
+ // Creates a bitset for the given level
141
+ inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(int64_t level) {
142
+ std::bitset<kVmapNumLevels> result;
143
+ result.set(level);
144
+ return result;
145
+ }
146
+
147
+ // Use this to construct a BatchedTensor from a regular Tensor
148
+ TORCH_API Tensor makeBatched(const Tensor& tensor, int64_t dim, int64_t level);
149
+
150
+ // Adds a batch dim to `tensor`, returning a BatchedTensor
151
+ TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t dim, int64_t level);
152
+
153
+ // Certain dispatch keys must be propagated to the BatchedTensor (or, in general,
154
+ // any wrapper Tensor subclasses). This is because there are methods on Tensor
155
+ // that skip dispatch and check for the presence of a dispatch key (e.g. is_cpu()).
156
+ // TODO: should probably contain more (or all?) backend keys
157
+ constexpr DispatchKeySet kKeysToPropagateToWrapper({
158
+ DispatchKey::Negative,
159
+ DispatchKey::Conjugate,
160
+ DispatchKey::XLA,
161
+ DispatchKey::CUDA,
162
+ DispatchKey::CPU,
163
+ });
164
+
165
+ inline DispatchKeySet getKeysToPropagateToWrapper(const Tensor& tensor, DispatchKeySet to_propagate=kKeysToPropagateToWrapper) {
166
+ auto key_set = tensor.unsafeGetTensorImpl()->key_set();
167
+ return key_set & kKeysToPropagateToWrapper;
168
+ }
169
+
170
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchingMetaprogramming.h ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+ #include <ATen/Tensor.h>
9
+ #include <ATen/VmapGeneratedPlumbing.h>
10
+
11
+ // This file contains template metaprogramming things that are used for our
12
+ // batching rules.
13
+ //
14
+ // See NOTE: [vmap plumbing] for more details on why this is necessary.
15
+ // The plumbing has a bunch of metaprogramming hacks for determining the signature
16
+ // of a batching rule from the signature of the operator, many of which use the
17
+ // helper functions in this file.
18
+
19
+ namespace at::functorch {
20
+
21
+ // Metaprogramming things
22
+ template <class... Items> using typelist = c10::guts::typelist::typelist<Items...>;
23
+ template <class TypeList> using head_t = c10::guts::typelist::head_t<TypeList>;
24
+ template <class TL1, class TL2> using concat_t = c10::guts::typelist::concat_t<TL1, TL2>;
25
+ template <typename T> class debug_t;
26
+
27
+ // tail operation
28
+ template<class TypeList>
29
+ struct tail final {
30
+ static_assert(c10::guts::false_t<TypeList>::value,
31
+ "In typelist::tail<T>, the T argument must be typelist<...>.");
32
+ };
33
+ template<class Head, class... Tail>
34
+ struct tail<typelist<Head, Tail...>> final {
35
+ using type = typelist<Tail...>;
36
+ };
37
+ template<class TypeList> using tail_t = typename tail<TypeList>::type;
38
+
39
+ template <class First, class Second, class Next, class Tail>
40
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext {
41
+ using type = Next;
42
+ };
43
+ template <class Next, class Tail>
44
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor, optional<int64_t>, Next, Tail> {
45
+ using type = Tail;
46
+ };
47
+ template <class Next, class Tail>
48
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const Tensor&, optional<int64_t>, Next, Tail> {
49
+ using type = Tail;
50
+ };
51
+ template <class Next, class Tail>
52
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor&, optional<int64_t>, Next, Tail> {
53
+ using type = Tail;
54
+ };
55
+ template <class Next, class Tail>
56
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<optional<Tensor>, optional<int64_t>, Next, Tail> {
57
+ using type = Tail;
58
+ };
59
+ template <class Next, class Tail>
60
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const optional<Tensor>&, optional<int64_t>, Next, Tail> {
61
+ using type = Tail;
62
+ };
63
+ template <class Next, class Tail>
64
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<optional<Tensor>&, optional<int64_t>, Next, Tail> {
65
+ using type = Tail;
66
+ };
67
+ template <class Next, class Tail>
68
+ struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<std::vector<Tensor>, optional<int64_t>, Next, Tail> {
69
+ using type = Tail;
70
+ };
71
+ template <class TypeList> struct RemoveBatchDimAfterTensor {
72
+ using first = head_t<TypeList>;
73
+ using next = tail_t<TypeList>;
74
+ using second = head_t<next>;
75
+ using tail = tail_t<next>;
76
+
77
+ using type = concat_t<
78
+ typelist<first>,
79
+ typename RemoveBatchDimAfterTensor<
80
+ typename IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<first, second, next, tail>::type
81
+ >::type
82
+ >;
83
+ };
84
+ template <class Type> struct RemoveBatchDimAfterTensor<typelist<Type>> {
85
+ using type = typelist<Type>;
86
+ };
87
+ template <> struct RemoveBatchDimAfterTensor<typelist<>> {
88
+ using type = typelist<>;
89
+ };
90
+ template<class TypeList> using remove_batch_dim_after_tensor_t = typename RemoveBatchDimAfterTensor<TypeList>::type;
91
+
92
+ template <typename T> struct UnpackSingleItemTuple {
93
+ using type = T;
94
+ };
95
+ template <typename T> struct UnpackSingleItemTuple<std::tuple<T>> {
96
+ using type = T;
97
+ };
98
+ template <typename T> using unpack_single_item_tuple_t = typename UnpackSingleItemTuple<T>::type;
99
+
100
+ template <typename Return, typename TupleArgs> struct BuildFunctionHelper;
101
+ template <typename Return, typename... Args> struct BuildFunctionHelper<Return, std::tuple<Args...>> {
102
+ using type = Return(Args...);
103
+ };
104
+ template <typename Return, typename TL>
105
+ struct BuildFunction {
106
+ using type = typename BuildFunctionHelper<Return, c10::guts::typelist::to_tuple_t<TL>>::type;
107
+ };
108
+ template <typename Return, typename TL> using build_function_t = typename BuildFunction<Return, TL>::type;
109
+
110
+
111
+ template <typename batch_rule_t> struct ToOperatorType {
112
+ using batch_rule_return_type = typename c10::guts::function_traits<batch_rule_t>::return_type;
113
+ using batch_rule_parameter_types = typename c10::guts::function_traits<batch_rule_t>::parameter_types;
114
+
115
+ using operator_parameter_types = remove_batch_dim_after_tensor_t<batch_rule_parameter_types>;
116
+ using operator_return_type =
117
+ unpack_single_item_tuple_t<
118
+ c10::guts::typelist::to_tuple_t<
119
+ remove_batch_dim_after_tensor_t<
120
+ c10::guts::typelist::from_tuple_t<batch_rule_return_type>>>>;
121
+
122
+ using type = build_function_t<operator_return_type, operator_parameter_types>;
123
+ };
124
+ template <typename batch_rule_t> using to_operator_t = typename ToOperatorType<batch_rule_t>::type;
125
+
126
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/DynamicLayer.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+ #include <ATen/functorch/Macros.h>
9
+ #include <c10/core/DispatchKey.h>
10
+ #include <ATen/core/function_schema.h>
11
+ #include <c10/util/Optional.h>
12
+ #include <c10/core/impl/LocalDispatchKeySet.h>
13
+ #include <ATen/functorch/Interpreter.h>
14
+ #include <ATen/functorch/VmapInterpreter.h>
15
+ #include <ATen/functorch/ADInterpreters.h>
16
+ #include <ATen/functorch/FunctionalizeInterpreter.h>
17
+
18
+ // Forward declared
19
+ namespace c10 { struct AutogradMetaInterface; }
20
+
21
+ namespace at::functorch {
22
+
23
+ // This file contains the implementation of functorch's interpreter stack.
24
+ // See NOTE: [functorch interpreter stack] first before reading on.
25
+ //
26
+ // NB: the functorch interpreter stack is also referred to as:
27
+ // - the "dynamic layer stack" -- an older name for "interpreter" was
28
+ // "dynamic layer".
29
+ // - the "functorch mode stack". You can think of each functorch transform as a
30
+ // "mode" (in the same sense as torch_dispatch mode or torch_function mode),
31
+ // and functorch being an implementation of a "mode stack" where the modes
32
+ // may be arbitrary composed.
33
+
34
+ // DynamicLayer is basically the same thing as an Interpreter.
35
+ // It represents a functorch transform and it holds an Interpreter,
36
+ // which contains metadata related to the transform and instructions on
37
+ // how to perform the transform.
38
+ //
39
+ // TODO: we can excise DynamicLayer in favor of Interpreter,
40
+ // But I am going to leave it for now as a compatiblity shim to avoid
41
+ // needing to refactor a lot of callsites...
42
+ struct TORCH_API DynamicLayer {
43
+ explicit DynamicLayer(
44
+ TransformType transform_type,
45
+ int64_t layerId,
46
+ optional<c10::SymInt> batchSize = nullopt,
47
+ optional<RandomnessType> randomness = nullopt,
48
+ optional<bool> prev_grad_mode = nullopt,
49
+ optional<bool> pre_fwd_grad_mode = nullopt,
50
+ optional<bool> functionalize_add_back_views = nullopt);
51
+
52
+ TransformType key() const;
53
+ int64_t layerId() const;
54
+
55
+ const Interpreter& interpreter() const { return interpreter_; }
56
+ Interpreter& interpreter() { return interpreter_; }
57
+
58
+ // Only valid for vmap
59
+ c10::SymInt batchSize() const;
60
+ RandomnessType randomness() const;
61
+
62
+ private:
63
+ Interpreter interpreter_;
64
+ };
65
+
66
+ TORCH_API int64_t initAndPushDynamicLayer(
67
+ TransformType transform_type,
68
+ optional<c10::SymInt> batch_size = nullopt,
69
+ optional<RandomnessType> randomness = nullopt,
70
+ optional<bool> prev_grad_mode = nullopt,
71
+ optional<bool> prev_fwd_grad_mode = nullopt,
72
+ optional<bool> functionalize_add_back_views = nullopt);
73
+ TORCH_API DynamicLayer popDynamicLayerAndDeleteMetadata();
74
+ TORCH_API c10::optional<DynamicLayer> maybeCurrentDynamicLayer();
75
+ TORCH_API const std::vector<DynamicLayer>& getDynamicLayerStack();
76
+ TORCH_API void setDynamicLayerStack(const std::vector<DynamicLayer>& stack);
77
+ TORCH_API void setDynamicLayerFrontBackKeysIncluded(bool included);
78
+
79
+ // NOTE: [Life handles and lexically scoped transforms]
80
+ // functorch transforms are lexically scoped.
81
+ // Given a level, we store a "life handle" that is a boolean that tells us if the
82
+ // transform with that level is active or not.
83
+ //
84
+ // functorch's TensorWrapper (for grad transforms) stores a life handle.
85
+ // If a TensorWrapper escapes from the scope of the transform, then somehow
86
+ // it must know it escaped; it can tell by querying the life handle.
87
+ TORCH_API const std::shared_ptr<bool>& getLifeHandleForLevel(int64_t level);
88
+
89
+ // Returns if an operator is in-place. An operator is inplace if:
90
+ // 1. The first argument is a Tensor and it is being written to
91
+ // 2. The first argument is being returned
92
+ // 3. No other arguments are aliased
93
+ // Here is an example of an in-place operator:
94
+ // add_(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
95
+ TORCH_API bool isInplaceOp(const c10::FunctionSchema& schema);
96
+
97
+ // Given the indices of unwrapped inputs and the schema, this returns the indices of any outputs that should remain unwrapped
98
+ TORCH_API c10::optional<size_t> findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input);
99
+
100
+ TORCH_API Tensor unwrapIfDead(const Tensor& tensor);
101
+ TORCH_API bool isDeadTensorWrapper(const Tensor& tensor);
102
+
103
+ // Pretty printers
104
+ TORCH_API std::ostream& operator<<(std::ostream& os, const DynamicLayer& layer);
105
+ TORCH_API std::ostream& operator<<(std::ostream& os, const std::vector<DynamicLayer>& dynamicLayerStack);
106
+
107
+ // While a functorch transform is active, torch.autograd.function._SingleLevelFunction
108
+ // is disabled by default. The following two APIs are APIs for enabling
109
+ // it. These are not user-facing APIs. We can delete this in the future, but
110
+ // it is useful for debugging when something goes wrong with the
111
+ // autograd.Function <> functorch interaction, which uses _SingleLevelFunction,
112
+ // because it leads to loud errors if something is incorrect.
113
+ TORCH_API void setSingleLevelAutogradFunctionAllowed(bool allowed);
114
+ TORCH_API bool getSingleLevelAutogradFunctionAllowed();
115
+
116
+ // While a functorch grad transform is active, Tensor.requires_grad_() gets
117
+ // disabled. These two functions are the mechanism to controlling that.
118
+ TORCH_API void setInplaceRequiresGradAllowed(bool allowed);
119
+ TORCH_API bool getInplaceRequiresGradAllowed();
120
+
121
+ TORCH_API DynamicLayer popDynamicLayer();
122
+ TORCH_API int64_t pushDynamicLayer(DynamicLayer&& layer);
123
+
124
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/FunctionalizeInterpreter.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/functorch/Interpreter.h>
3
+
4
+ namespace at::functorch {
5
+
6
+ // This is the interpreter that handles the functionalize() transform.
7
+ // See NOTE: [functorch interpreter stack] for more details.
8
+
9
+ struct FunctionalizeInterpreterPtr {
10
+ explicit FunctionalizeInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Functionalize); }
11
+ TransformType key() const { return base_->key(); }
12
+ int64_t level() const { return base_->level(); }
13
+ void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
14
+ void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
15
+ bool functionalizeAddBackViews() const {
16
+ return std::get<FunctionalizeInterpreterMeta>(base_->meta()).functionalizeAddBackViews_;
17
+ }
18
+ private:
19
+ const Interpreter* base_;
20
+ };
21
+
22
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/Interpreter.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/functorch/Macros.h>
4
+ #include <ATen/core/dispatch/Dispatcher.h>
5
+ #include <c10/core/impl/LocalDispatchKeySet.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <bitset>
8
+ #include <variant>
9
+
10
+ namespace at::functorch {
11
+
12
+ // NOTE: [functorch interpreter stack]
13
+ //
14
+ // functorch's dispatching system uses a stack of interpreters.
15
+ // Historically we've referred to this as the "DynamicLayerStack".
16
+ //
17
+ // An interpreter is something that reads in the code it is passed
18
+ // and then executes it. We have a different interpreter per-transform:
19
+ // the "VmapInterpreter" is responsible for reading in operators (like aten::mv)
20
+ // and executing the batched version of it (the batching rule for aten::mv).
21
+ //
22
+ // Concretely, each interpreter is responsible for two things:
23
+ //
24
+ // 1) process(ophandle, stack)
25
+ // Given an operator handle and a stack of arguments, the interpreter is
26
+ // responsible for figuring out how to execute the operation under the semantics
27
+ // of the interpreter. For e.g. VmapInterpreter, this is figuring out how to call
28
+ // the batching rule.
29
+ //
30
+ // The batching rules are stored as kernels on the FuncTorchBatched key, so the way
31
+ // VmapInterpreter calls the batching rule is roughly: (A) exclude all
32
+ // dispatch keys aside from the Batched key, (B) redispatch so we get to the
33
+ // Batched key.
34
+ //
35
+ // 2) sendToNextInterpreter(ophandle, stack)
36
+ // The VmapInterpreter, when it sees aten::mv, will process it into a call to
37
+ // aten::mm. It then needs to send the call to aten::mm to the next interpreter
38
+ // in the interpreter stack.
39
+ //
40
+ // The VmapInterpreter just does this via a call to ophandle.callBoxed(stack)
41
+ // and most Interpreters will implement it this way.
42
+
43
+ enum class RandomnessType {
44
+ Error, // always errors when calling a random function
45
+ Same, // randomness appears the same across batches
46
+ Different, // randomness appears different across batches
47
+ END
48
+ };
49
+
50
+ enum class TransformType {
51
+ Torch, // Unused
52
+ Vmap,
53
+ Grad, // reverse-mode AD, aka vjp
54
+ Jvp, // forward-mode AD
55
+ Functionalize,
56
+ };
57
+
58
+ std::ostream& operator<<(std::ostream& os, const TransformType& t);
59
+
60
+ // NOTE: [Interpreter "subclassing" design]
61
+ //
62
+ // How are various Interpreters for different transforms (vmap, grad, ...)
63
+ // implemented?
64
+ //
65
+ // Accessing interpreters is in the hot-path of functorch so we have a constraint
66
+ // that this code must be as fast as possible.
67
+ //
68
+ // As a result, we stay away from virtual methods and this causes our code
69
+ // to look a little funny.
70
+ //
71
+ // `Interpreter` is the struct for Interpreters. It holds ALL of the
72
+ // relevant information (what type of interpreter it is and the metadata).
73
+ // Metadata for each interpreter is represented as a Union (std::variant)
74
+ // of all possible metadata (VmapInterpreterMeta, GradInterpreterMeta, ...).
75
+ //
76
+ // Given an Interpreter, how do I get a "VmapInterpreter"? You may wish to do this
77
+ // if you want to access the metadata fields (like batchSize and randomness).
78
+ //
79
+ // Each type of interpreter (e.g. Vmap) has a convenience struct
80
+ // (e.g. VmapInterpreterPtr) associated with it.
81
+ //
82
+ // Construct the convenience struct with VmapInterpreterPtr(Interpreter*),
83
+ // and then one can access methods on VmapInterpreterPtr like so:
84
+ // >>> VmapInterpreterPtr(&interpreter).batchSize()
85
+ //
86
+ // Finally, Interpreter::process switches on the type of the interpreter
87
+ // and calls one of {Transform}Intepreter::processImpl under the hood.
88
+ // Same for Interpreter::sendToNextInterpreter :)
89
+
90
+ struct VmapInterpreterMeta {
91
+ explicit VmapInterpreterMeta(c10::SymInt batchSize, RandomnessType randomness) :
92
+ batchSize_(std::move(batchSize)), randomness_(randomness) {}
93
+ c10::SymInt batchSize_;
94
+ RandomnessType randomness_;
95
+ };
96
+
97
+ struct GradInterpreterMeta {
98
+ explicit GradInterpreterMeta(bool prevGradMode): prevGradMode_(prevGradMode) {}
99
+ bool prevGradMode_;
100
+ };
101
+
102
+ struct JvpInterpreterMeta {
103
+ explicit JvpInterpreterMeta(bool prevFwdGradMode) : prevFwdGradMode_(prevFwdGradMode) {}
104
+ bool prevFwdGradMode_;
105
+ };
106
+
107
+ struct FunctionalizeInterpreterMeta {
108
+ explicit FunctionalizeInterpreterMeta(bool functionalizeAddBackViews) :
109
+ functionalizeAddBackViews_(functionalizeAddBackViews) {}
110
+ bool functionalizeAddBackViews_;
111
+ };
112
+
113
+ typedef std::variant<
114
+ int64_t,
115
+ GradInterpreterMeta,
116
+ JvpInterpreterMeta,
117
+ VmapInterpreterMeta,
118
+ FunctionalizeInterpreterMeta
119
+ > InterpreterMeta;
120
+
121
+
122
+ struct Interpreter {
123
+ // factory functions
124
+ static Interpreter Vmap(int64_t level, c10::SymInt batchSize, RandomnessType randomness) {
125
+ return Interpreter(TransformType::Vmap, level, VmapInterpreterMeta(std::move(batchSize), randomness));
126
+ }
127
+ static Interpreter Grad(int64_t level, bool prevGradMode) {
128
+ return Interpreter(TransformType::Grad, level, GradInterpreterMeta(prevGradMode));
129
+ }
130
+ static Interpreter Jvp(int64_t level, bool prevFwdGradMode) {
131
+ return Interpreter(TransformType::Jvp, level, JvpInterpreterMeta(prevFwdGradMode));
132
+ }
133
+ static Interpreter Functionalize(int64_t level, bool functionalizeAddBackViews) {
134
+ return Interpreter(TransformType::Functionalize, level, FunctionalizeInterpreterMeta(functionalizeAddBackViews));
135
+ }
136
+
137
+ // methods
138
+ TransformType key() const { return type_; }
139
+ int64_t level() const { return level_; }
140
+ const InterpreterMeta& meta() const { return meta_; }
141
+
142
+ void process(const c10::OperatorHandle& op, torch::jit::Stack* stack);
143
+ void sendToNextInterpreter(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
144
+
145
+ void saveLocalDispatchKeySet(c10::impl::LocalDispatchKeySet keyset) {
146
+ TORCH_INTERNAL_ASSERT(!savedLocalDispatchKeySet_.has_value());
147
+ savedLocalDispatchKeySet_ = std::move(keyset);
148
+ }
149
+ void clearSavedLocalDispatchKeySet() {
150
+ TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value());
151
+ savedLocalDispatchKeySet_ = c10::nullopt;
152
+ }
153
+ c10::impl::LocalDispatchKeySet getSavedLocalDispatchKeySet() const {
154
+ TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value());
155
+ return *savedLocalDispatchKeySet_;
156
+ }
157
+
158
+ // An Interpreter is alive if we are currently inside the ongoing transform
159
+ // for the interpreter. For example, vmap(f)(x); inside of f, the vmap's
160
+ // corresponding Interpreter is alive, even when it is not on the DynamicLayerStack.
161
+ bool is_alive() const {
162
+ return *is_alive_;
163
+ }
164
+ const std::shared_ptr<bool>& is_alive_ptr() const {
165
+ return is_alive_;
166
+ }
167
+ void set_is_alive(bool alive) {
168
+ *is_alive_ = alive;
169
+ }
170
+
171
+ // Please don't use this
172
+ explicit Interpreter() = default;
173
+
174
+ private:
175
+ explicit Interpreter(TransformType type, int64_t level, InterpreterMeta meta):
176
+ type_(type), level_(level), is_alive_(std::make_shared<bool>(false)), meta_(meta) {}
177
+
178
+ // fields
179
+ TransformType type_;
180
+ int64_t level_;
181
+ optional<c10::impl::LocalDispatchKeySet> savedLocalDispatchKeySet_;
182
+ std::shared_ptr<bool> is_alive_;
183
+ InterpreterMeta meta_;
184
+ };
185
+
186
+ // Applies the following for-loop:
187
+ // for i in range(begin, end):
188
+ // args[i] = func(args[i])
189
+ void foreachTensorInplace(std::vector<IValue>& args, int64_t begin, int64_t end,
190
+ std::function<Tensor(const Tensor&)> func);
191
+
192
+ // Applies the following for-loop:
193
+ // for i in range(begin, end):
194
+ // if use_flag_relative[i] == 1: <-- treats use_flag_relative as a bitset
195
+ // args[i] = func(args[i], i - begin, true)
196
+ // args[i] = func(args[i], i - begin)
197
+ void foreachTensorInplaceWithFlag(std::vector<IValue>& args, int64_t begin, int64_t end,
198
+ const std::bitset<64> use_flag_relative, std::function<Tensor(const Tensor&, bool)> func);
199
+
200
+ std::vector<int64_t> findUnwrappedInputs(std::vector<IValue>& args, int64_t begin, int64_t end);
201
+
202
+ DispatchKeySet keysToExcludeWhenEnteringDynamicLayer(TransformType key);
203
+
204
+ void setup_dispatch_key_tls(TransformType key, DispatchKeySet include);
205
+
206
+ void sanityCheckStack(const c10::OperatorHandle& op, torch::jit::Stack* stack);
207
+
208
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/LegacyVmapTransforms.h ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <ATen/functorch/Macros.h>
10
+ #include <ATen/functorch/BatchedTensorImpl.h>
11
+
12
+ namespace at::functorch {
13
+
14
+ // This files contains the legacy (now-deprecated) batching rule API.
15
+ // Please try to use the new-style batching rule API (see writing_batch_rules.md)
16
+
17
+ // This file contains abstractions used for transforming *logical* vmap arguments
18
+ // into *physical* arguments. (Keep reading for definitions of these terms).
19
+
20
+ // NOTE: [Logical vs physical args]
21
+ // Consider the following vmap.
22
+ // vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
23
+ // This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
24
+ // with batch dims 0 and 2:
25
+ // BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
26
+ //
27
+ // We say the *logical* view of the tensor has size [3] -- tensors inside
28
+ // `func` appear to have size [3].
29
+ // However, the *physical* underlying tensor (the one passed to vmap) has size
30
+ // [2, 3, 4].
31
+ //
32
+ // This notion of logical vs physical also extends to non-tensor arguments.
33
+ // Consider the previous tensor; let's assume the user called
34
+ // `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
35
+ // dimension they are reducing over is dim 0 but the physical dim is dim 1
36
+ // (the first non-batch dimension)
37
+
38
+ // Forward declared; see NOTE: [What is a VmapPhysicalView?]
39
+ struct VmapPhysicalView;
40
+
41
+ // Most PyTorch operators take 4 or fewer inputs.
42
+ constexpr int64_t kVmapTransformStaticInputSize = 4;
43
+ using VmapPhysicalViewVec = SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
44
+
45
+ // Pytorch generally advertises good performance for <= 5 dims.
46
+ // (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
47
+ // dimensions to get 8. Adjust this number as necessary
48
+ constexpr int64_t kVmapStaticDimVecSize = 8;
49
+ using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
50
+ using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>;
51
+
52
+ // NOTE: [What is an VmapTransform?]
53
+ // An *VmapTransform* converts logical views of tensors to physical views.
54
+ //
55
+ // Batching rules use VmapTransforms to convert logical arguments to
56
+ // physical arguments, then call one or more at:: operator that handles the
57
+ // physical arguments, and then converts the physical result back to a logical
58
+ // argument.
59
+
60
+ // VmapTransform for operators that take tensors with multiple batch dims.
61
+ // Given one or more logical views on Tensors, `logicalToPhysical`
62
+ // permutes all of the batch dims to the front of the tensor, aligns
63
+ // and expands the batch dims to match each other (according to their `level`),
64
+ // and returns a VmapPhysicalView on the tensor(s).
65
+ struct TORCH_API MultiBatchVmapTransform {
66
+ static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
67
+ static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
68
+ };
69
+
70
+ // VmapTransform for operators that broadcast all inputs.
71
+ // Given some logical views on Tensors, `logicalToPhysical`:
72
+ // - permutes all of the batch dims to the front of the tensors
73
+ // - aligns all the batch dims to the collective levels of all of the tensors.
74
+ // If a tensor does not have a batch dim for a vmap level, then it receives
75
+ // a size-one dimension for said level.
76
+ // - aligns the non-batch dims to have the same dimensionality, adding extra
77
+ // size-1 dimensions in between the batch dimensions and the non-batch dimensions
78
+ // so that the batch dimensions are lined up from the right.
79
+ //
80
+ // For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
81
+ // dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap tensors
82
+ // of size (B, 1, 2) and (B, 3, 2).
83
+ //
84
+ // Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
85
+ // VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
86
+ // actually *need* to return a tensor of size (1, 2) for the second tensor
87
+ // because the broadcasting operation takes care of that for us, but we do
88
+ // it anyways to keep things simple.
89
+ struct TORCH_API BroadcastingVmapTransform {
90
+ static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
91
+ };
92
+
93
+ // Forward declared, if you're reading this file head to toe, don't worry about
94
+ // it yet.
95
+ struct VmapPhysicalToLogicalMap;
96
+
97
+ // NOTE: [What is a VmapPhysicalView?]
98
+ // VmapPhysicalView represents a physical view on a Tensor.
99
+ //
100
+ // One can use it to further convert logical dimension indices, logical shapes,
101
+ // and more to their physical variants, or convert a new (physical) tensor into
102
+ // a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
103
+ //
104
+ // VmapPhysicalView stores a physical tensor with all of its batch dimensions at
105
+ // the front and some levels that correspond to said batch dimensions.
106
+ //
107
+ // The levels bitset specifies which vmap levels correspond to the batch
108
+ // dimensions at the front of the tensor. In particular, the number of set bits
109
+ // corresponds to the number of batch dimensions on `tensor` and the rightmost
110
+ // bit of `levels` specifies the maximum number of nested vmaps we are in at
111
+ // this point in time.
112
+ // For example, given:
113
+ // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
114
+ //
115
+ // Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
116
+ // than or equal to 3.
117
+ // bitset: 010100
118
+ // ^
119
+ // |
120
+ // levels: 012345
121
+ struct TORCH_API VmapPhysicalView {
122
+ VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
123
+ : levels_(levels), tensor_(tensor) {
124
+ // TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor));
125
+ }
126
+
127
+ Tensor& tensor() { return tensor_; }
128
+ const Tensor& tensor() const { return tensor_; }
129
+
130
+ // Maps logical dim indices to physical dim indices. Also does dim wrapping.
131
+ //
132
+ // For example, given:
133
+ // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
134
+ //
135
+ // Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
136
+ // This is because the size of levels tell us that the first two dimensions
137
+ // of `tensor_` are batch dimensions, so a logical dim of `n` is actually
138
+ // a physical dim of `n + 2`.
139
+ VmapDimVector getPhysicalDims(IntArrayRef logical_dims) const;
140
+ int64_t getPhysicalDim(int64_t logical_dim) const;
141
+
142
+ // Returns a VmapPhysicalToLogicalMap object. This can be used for
143
+ // mapping a physical tensor to a new logical tensor (BatchedTensor)
144
+ VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
145
+
146
+ // Maps a logical shape to a physical shape by pre-pending the batch
147
+ // sizes to the logical shape.
148
+ VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
149
+ SymDimVector getPhysicalShape(c10::SymIntArrayRef logical_shape) const;
150
+
151
+ int64_t numBatchDims() const;
152
+
153
+ private:
154
+ int64_t numLogicalDims() const;
155
+
156
+ std::bitset<kVmapNumLevels> levels_;
157
+ Tensor tensor_;
158
+ };
159
+
160
+ // Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
161
+ // to a logical one (BatchedTensor). It holds some levels that are used to do the
162
+ // mapping and assumes that the batch dimensions in the physical tensor all
163
+ // occur at the front of the tensor.
164
+ struct TORCH_API VmapPhysicalToLogicalMap {
165
+ VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels): levels_(levels) {}
166
+
167
+ // Maps a physical tensor to a new logical tensor (BatchedTensor).
168
+ // Assumes that all of the "batch dimensions" are at the front
169
+ // of the physical tensor. For example, given:
170
+ // - x = rank-4 Tensor with size 2, 3, 5, 7
171
+ // - levels = (2, 4)
172
+ // Returns:
173
+ // - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
174
+ Tensor apply(const Tensor& physical_tensor) const;
175
+
176
+ // Given a vector of physical tensors,
177
+ // 1. maps each tensor to a new logical tensor. Assumes that all of the
178
+ // "batch dimensions" are at the front of the physical tensors.
179
+ // 2. stores the new logical tensors back into the passed-in vector. This is
180
+ // to avoid additional dynamic allocations.
181
+ void applyInplace(std::vector<Tensor>& physical_tensors) const;
182
+
183
+ std::bitset<kVmapNumLevels> levels_;
184
+ };
185
+
186
+
187
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/Macros.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+
3
+ #define SINGLE_ARG(...) __VA_ARGS__
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/PlumbingHelper.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+ #pragma once
7
+ #include <ATen/Tensor.h>
8
+ #include <ATen/functorch/BatchedTensorImpl.h>
9
+ #include <ATen/functorch/DynamicLayer.h>
10
+
11
+ // NOTE: [vmap plumbing]
12
+ //
13
+ // Here's how "batching rules" work.
14
+ // - we register kernels to the Batched key
15
+ // - these kernels have the same signatures as the original operators.
16
+ // For example, at::sin(Tensor self) accepts a Tensor, and the batched kernel
17
+ // must also accept a Tensor
18
+ // - However, it is more natural for users to write a batching rule like the
19
+ // following: sin_batch_rule(Tensor self, optional<int> self_bdim)
20
+ // - There is some codegenerated layer (the "plumbing") that wraps the user
21
+ // defined batching rule (e.g. sin_batch_rule) in a kernel that can be
22
+ // registered to the Batched key.
23
+ //
24
+ // The plumbing is responsible for wrapping a batching rule into a form that may
25
+ // be registered as the kernel for the batched key.
26
+
27
+ namespace at::functorch {
28
+
29
+ void vmap_check_escaped(const optional<DynamicLayer> &layer, const char* what);
30
+
31
+ // Create a BatchedTensor given a tensor, bdim, and level
32
+ TORCH_API Tensor makeBatched(const Tensor& tensor, optional<int64_t> bdim, int64_t level);
33
+
34
+ // Given a Tensor that may or may not be a BatchedTensor, unwrap it.
35
+ // If `tensor` is not a BatchedTensor, or is a BatchedTensor but the level
36
+ // doesn't match, then this returns (tensor, nullopt).
37
+ // Otherwise, it returns (unwrap(tensor), bdim).
38
+ TORCH_API std::tuple<Tensor, c10::optional<int64_t>> unwrapTensorAtLevel(const Tensor& tensor, int64_t level);
39
+
40
+ // Creates a vector of BatchedTensor
41
+ TORCH_API std::vector<Tensor> makeBatchedVector(const std::vector<Tensor>& tensors, optional<int64_t> bdim, int64_t level);
42
+
43
+ // Returns True if ANY tensor in tensors is batched at level
44
+ TORCH_API bool isBatchedAtLevel(ITensorListRef tensors, int64_t level);
45
+ TORCH_API bool isBatchedAtLevel(const c10::List<c10::optional<Tensor>>& maybe_tensors, int64_t level);
46
+ TORCH_API bool isBatchedAtLevel(const Tensor& tensor, int64_t level);
47
+ TORCH_API bool isBatchedAtLevel(const c10::optional<Tensor>& maybe_tensor, int64_t level);
48
+
49
+ // Convenience helper. Returns true if any tensor is batched at level
50
+ TORCH_API bool areAnyBatchedAtLevel(ArrayRef<optional<Tensor>> maybe_tensors, int64_t level);
51
+
52
+ inline bool ivalueParticipatesInCurrentLevel(const IValue& ivalue) {
53
+ if (ivalue.isTensor()) {
54
+ auto maybe_level = maybeCurrentDynamicLayer();
55
+ TORCH_INTERNAL_ASSERT(maybe_level.has_value());
56
+ auto current_level = maybe_level->layerId();
57
+ return isBatchedAtLevel(ivalue.toTensor(), current_level);
58
+ }
59
+ // TODO: should really check this
60
+ return false;
61
+ }
62
+
63
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/TensorWrapper.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <ATen/functorch/Macros.h>
10
+ #include <ATen/Tensor.h>
11
+ #include <ATen/functorch/Interpreter.h>
12
+
13
+ namespace at::functorch {
14
+
15
+ // NOTE: [functorch's TensorWrapper]
16
+ //
17
+ // Taking better suggestions for a name. TensorWrapper is the wrapper Tensor
18
+ // Subclass for functorch's grad-based transforms (grad, vjp, jvp). It is
19
+ // analogous to how vmap uses BatchedTensor as the wrapper Tensor subclass.
20
+ //
21
+ // If you're familiar with the Tensor-Variable merge, TensorWrapper is effectively
22
+ // another Variable.
23
+ //
24
+ // Consider grad(grad(torch.sin))(x). This wraps `x` as TensorWrapper(TensorWrapper(x)).
25
+ // The reason why is so that each TensorWrapper can hold its own AutogradMeta and
26
+ // participate in a **separate** autograd graph.
27
+ //
28
+ // There are alternative designs we could have chosen (e.g. each grad transform
29
+ // stores a weak map of Tensor -> AutogradMeta); the benefit of the TensorWrapper
30
+ // design is that we can re-use existing VariableType kernels (i.e. Autograd kernels)
31
+ // without much modification. Since a TensorWrapper looks like a regular Tensor,
32
+ // the VariableType kernel can pull out the AutogradMeta struct from where it
33
+ // expects and extend the autograd graph
34
+
35
+ struct TORCH_API TensorWrapper : public c10::TensorImpl {
36
+ explicit TensorWrapper(
37
+ c10::DispatchKeySet key_set,
38
+ Tensor value,
39
+ int64_t level,
40
+ std::shared_ptr<bool> is_alive,
41
+ bool is_immutable = false, // if true, this came from an operation that aliases an immutable tensor
42
+ bool use_value_sizes_strides = true);
43
+
44
+ void refreshMetadata();
45
+
46
+ const Tensor& value() const {
47
+ return value_;
48
+ }
49
+ optional<int64_t> level() const {
50
+ if (is_alive()) {
51
+ return level_;
52
+ }
53
+ return {};
54
+ }
55
+ bool is_immutable() const {
56
+ return is_immutable_;
57
+ }
58
+ bool is_alive() const;
59
+
60
+ // Overrides necessary for autograd
61
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
62
+ const c10::VariableVersion& version_counter,
63
+ bool allow_tensor_metadata_change) const override;
64
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
65
+ c10::VariableVersion&& version_counter,
66
+ bool allow_tensor_metadata_change) const override;
67
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
68
+
69
+ private:
70
+ const char* tensorimpl_type_name() const override;
71
+ Tensor value_;
72
+ int64_t level_;
73
+ bool is_immutable_;
74
+
75
+ // TensorWrapper receives a boolean flag on whether or not the Grad Interpreter
76
+ // that created it is still alive or not.
77
+ // If the Grad Interpreter is no longer alive then it attempts to behave like
78
+ // a regular Tensor.
79
+ //
80
+ // When we exit the level, this wrapper may be marked as "not alive".
81
+ // Wrappers that are not alive:
82
+ // 1) May still have autograd metadata on them
83
+ // 2) Forward dispatches to the underlying value()
84
+ std::shared_ptr<bool> is_alive_;
85
+ };
86
+
87
+ // There are two variants of makeTensorWrapper: one that accepts a level
88
+ // and one that accepts an Interpreter.
89
+ //
90
+ // The one that accepts a level tries to automatically get the life handle from the
91
+ // interpreter on the DynamicLayerStack.
92
+ // It needs to be used with caution: if the interpreter is not on the
93
+ // DynamicLayerStack, then we won't be able to find the life handle.
94
+ //
95
+ // In practice this isn't a problem: when we're constructing TensorWrapper in
96
+ // Python, the corresponding interpreter is on the stack.
97
+ TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, int64_t level, bool is_immutable=false);
98
+ TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, const Interpreter& interpreter, bool is_immutable=false);
99
+ TORCH_API TensorWrapper* maybeGetTensorWrapper(const Tensor& tensor);
100
+ TORCH_API void dumpTensor(std::ostream & ss, const Tensor& tensor);
101
+ TORCH_API void dumpTensorCout(const Tensor& tensor);
102
+
103
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/functorch/VmapInterpreter.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/functorch/Interpreter.h>
3
+
4
+ namespace at::functorch {
5
+
6
+ // This is the interpreter that handles the functionalize() transform.
7
+ // See NOTE: [functorch interpreter stack] for more details.
8
+
9
+ struct VmapInterpreterPtr {
10
+ explicit VmapInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Vmap); }
11
+ TransformType key() const { return base_->key(); }
12
+ int64_t level() const { return base_->level(); }
13
+ void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
14
+ void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
15
+ c10::SymInt batchSize() const {
16
+ return std::get<VmapInterpreterMeta>(base_->meta()).batchSize_;
17
+ }
18
+ RandomnessType randomness() const {
19
+ return std::get<VmapInterpreterMeta>(base_->meta()).randomness_;
20
+ }
21
+ private:
22
+ const Interpreter* base_;
23
+ };
24
+
25
+ } // namespace at::functorch
venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/DeviceType.h>
5
+
6
+ // Use of c10::hip namespace here makes hipification easier, because
7
+ // I don't have to also fix namespaces. Sorry!
8
+ namespace c10 { namespace hip {
9
+
10
+ // Takes a valid HIPAllocator (of any sort) and turns it into
11
+ // an allocator pretending to be a CUDA allocator. See
12
+ // Note [Masquerading as CUDA]
13
+ class HIPAllocatorMasqueradingAsCUDA final : public Allocator {
14
+ Allocator* allocator_;
15
+ public:
16
+ explicit HIPAllocatorMasqueradingAsCUDA(Allocator* allocator)
17
+ : allocator_(allocator) {}
18
+ DataPtr allocate(size_t size) override {
19
+ DataPtr r = allocator_->allocate(size);
20
+ r.unsafe_set_device(Device(c10::DeviceType::CUDA, r.device().index()));
21
+ return r;
22
+ }
23
+ DeleterFnPtr raw_deleter() const override {
24
+ return allocator_->raw_deleter();
25
+ }
26
+ void copy_data(void* dest, const void* src, std::size_t count) const final {
27
+ allocator_->copy_data(dest, src, count);
28
+ }
29
+ };
30
+
31
+ }} // namespace c10::hip
venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/hip/HIPCachingAllocator.h>
4
+ #include <ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h>
5
+ #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
6
+
7
+ namespace c10 {
8
+ // forward declaration
9
+ class DataPtr;
10
+ namespace hip {
11
+ namespace HIPCachingAllocatorMasqueradingAsCUDA {
12
+
13
+ C10_HIP_API Allocator* get();
14
+ C10_HIP_API void recordStreamMasqueradingAsCUDA(const DataPtr& ptr, HIPStreamMasqueradingAsCUDA stream);
15
+
16
+ } // namespace HIPCachingAllocatorMasqueradingAsCUDA
17
+ } // namespace hip
18
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/hip/HIPConfig.h>
4
+
5
+ // The includes of HIPGuard.h
6
+ #include <c10/hip/impl/HIPGuardImpl.h>
7
+ #include <c10/hip/HIPMacros.h>
8
+ #include <c10/core/DeviceType.h>
9
+ #include <c10/core/impl/InlineDeviceGuard.h>
10
+ #include <c10/core/impl/InlineStreamGuard.h>
11
+ #include <c10/util/Exception.h>
12
+
13
+ #include <c10/hip/impl/HIPGuardImpl.h>
14
+
15
+ #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
16
+ #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
17
+
18
+ // Use of c10::hip namespace here makes hipification easier, because
19
+ // I don't have to also fix namespaces. Sorry!
20
+ namespace c10 { namespace hip {
21
+
22
+ // Note [Masquerading as CUDA]
23
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
24
+ // c10_hip is very easy to understand: it is HIPified from c10_cuda,
25
+ // and anywhere you said CUDA, the source code now says HIP. HIPified
26
+ // PyTorch is much harder to understand: it is HIPified from regular
27
+ // PyTorch, yes, but NO source-to-source translation from CUDA to
28
+ // HIP occurs; instead, anywhere we see "CUDA", it actually means "HIP".
29
+ // For example, when you use HIPified PyTorch, you say x.cuda() to
30
+ // move a tensor onto ROCm device. We call this situation "HIP
31
+ // masquerading as CUDA".
32
+ //
33
+ // This leads to a very awkward situation when we want to call c10_hip
34
+ // code from PyTorch, since c10_hip is expecting things to be called
35
+ // HIP, but PyTorch is calling them CUDA (masquerading as HIP). To
36
+ // fix this impedance mismatch, we have MasqueradingAsCUDA variants
37
+ // for all c10_hip classes. These translate between the "HIP" and "CUDA
38
+ // masquerading as HIP" worlds. For example,
39
+ // HIPGuardImplMasqueradingAsCUDA (this file) provides something like a
40
+ // HIPGuardImpl, but it reports its DeviceType as CUDA (e.g., type()
41
+ // returns CUDA, getDevice() reports the current HIP device as a CUDA
42
+ // device.)
43
+ //
44
+ // We should be able to delete all of these classes entirely once
45
+ // we switch PyTorch to calling a HIP a HIP.
46
+ //
47
+ // When you add a new MasqueradingAsCUDA class/function, you need to
48
+ // also update the rewrite rules in torch/utils/hipify/cuda_to_hip_mappings.py
49
+ //
50
+ //
51
+ //
52
+ // By the way, note that the cpp file associated with this also
53
+ // *overwrites* the entry in the DeviceGuardImpl registry for CUDA with
54
+ // this HIP implementation.
55
+
56
+ struct HIPGuardImplMasqueradingAsCUDA final : public c10::impl::DeviceGuardImplInterface {
57
+ static constexpr c10::DeviceType static_type = c10::DeviceType::CUDA;
58
+ HIPGuardImplMasqueradingAsCUDA() {}
59
+ HIPGuardImplMasqueradingAsCUDA(c10::DeviceType t) {
60
+ TORCH_INTERNAL_ASSERT(t == c10::DeviceType::CUDA);
61
+ }
62
+ c10::DeviceType type() const override {
63
+ return c10::DeviceType::CUDA;
64
+ }
65
+ Device exchangeDevice(Device d) const override {
66
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
67
+ Device old_device = getDevice();
68
+ if (old_device.index() != d.index()) {
69
+ C10_HIP_CHECK(hipSetDevice(d.index()));
70
+ }
71
+ return old_device;
72
+ }
73
+ Device getDevice() const override {
74
+ int device;
75
+ C10_HIP_CHECK(hipGetDevice(&device));
76
+ return Device(c10::DeviceType::CUDA, device);
77
+ }
78
+ void setDevice(Device d) const override {
79
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
80
+ C10_HIP_CHECK(hipSetDevice(d.index()));
81
+ }
82
+ void uncheckedSetDevice(Device d) const noexcept override {
83
+ C10_HIP_CHECK_WARN(hipSetDevice(d.index()));
84
+ }
85
+ Stream getStream(Device d) const noexcept override {
86
+ return getCurrentHIPStreamMasqueradingAsCUDA(d.index()).unwrap();
87
+ }
88
+ Stream getDefaultStream(Device d) const override {
89
+ return getDefaultHIPStreamMasqueradingAsCUDA(d.index());
90
+ }
91
+ Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) const override {
92
+ return getStreamFromPoolMasqueradingAsCUDA(isHighPriority, d.index());
93
+ }
94
+ Stream exchangeStream(Stream s) const noexcept override {
95
+ HIPStreamMasqueradingAsCUDA cs(s);
96
+ auto old_stream = getCurrentHIPStreamMasqueradingAsCUDA(s.device().index());
97
+ setCurrentHIPStreamMasqueradingAsCUDA(cs);
98
+ return old_stream.unwrap();
99
+ }
100
+ DeviceIndex deviceCount() const noexcept override {
101
+ int deviceCnt;
102
+ hipError_t _err;
103
+ _err = hipGetDeviceCount(&deviceCnt);
104
+ #if defined(USE_ROCM) && (ROCM_VERSION < 50201)
105
+ if(_err == hipErrorInvalidDevice)
106
+ return 0;
107
+ #endif
108
+ if(_err != hipErrorNoDevice && _err != hipSuccess)
109
+ C10_HIP_CHECK(_err);
110
+ return deviceCnt;
111
+ }
112
+
113
+ // Event-related functions
114
+ // Note: hipEventCreateWithFlags should be called on the same device as
115
+ // the recording stream's device.
116
+ void createEvent(
117
+ hipEvent_t* hip_event,
118
+ const EventFlag flag) const {
119
+ // Maps PyTorch's Event::Flag to HIP flag
120
+ auto hip_flag = hipEventDefault;
121
+ switch (flag) {
122
+ case EventFlag::PYTORCH_DEFAULT:
123
+ case EventFlag::HIP_EVENT_DISABLE_TIMING:
124
+ hip_flag = hipEventDisableTiming;
125
+ break;
126
+ case EventFlag::BACKEND_DEFAULT:
127
+ case EventFlag::HIP_EVENT_DEFAULT:
128
+ hip_flag = hipEventDefault;
129
+ break;
130
+ default:
131
+ TORCH_CHECK(false, "HIP event received unknown flag");
132
+ }
133
+
134
+ C10_HIP_CHECK(hipEventCreateWithFlags(hip_event, hip_flag));
135
+ }
136
+
137
+ void destroyEvent(
138
+ void* event,
139
+ const DeviceIndex device_index) const noexcept override {
140
+ if (!event) return;
141
+ auto hip_event = static_cast<hipEvent_t>(event);
142
+ int orig_device;
143
+ C10_HIP_CHECK_WARN(hipGetDevice(&orig_device));
144
+ C10_HIP_CHECK_WARN(hipSetDevice(device_index));
145
+ C10_HIP_CHECK_WARN(hipEventDestroy(hip_event));
146
+ C10_HIP_CHECK_WARN(hipSetDevice(orig_device));
147
+ }
148
+
149
+ void record(void** event,
150
+ const Stream& stream,
151
+ const DeviceIndex device_index,
152
+ const EventFlag flag) const override {
153
+ TORCH_CHECK(device_index == -1 || device_index == stream.device_index(),
154
+ "Event device index ",
155
+ device_index,
156
+ " does not match recording stream's device index ",
157
+ stream.device_index(),
158
+ ".");
159
+
160
+ hipEvent_t hip_event = static_cast<hipEvent_t>(*event);
161
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
162
+
163
+ // Moves to stream's device to record
164
+ const auto orig_device = getDevice();
165
+ setDevice(stream.device());
166
+
167
+ // Creates the event (lazily)
168
+ if (!hip_event) createEvent(&hip_event, flag);
169
+ C10_HIP_CHECK(hipEventRecord(hip_event, hip_stream));
170
+ // Makes the void* point to the (possibly just allocated) HIP event
171
+ *event = hip_event;
172
+
173
+ // Resets device
174
+ setDevice(orig_device);
175
+ }
176
+
177
+ void block(
178
+ void* event,
179
+ const Stream& stream) const override {
180
+ if (!event) return;
181
+ hipEvent_t hip_event = static_cast<hipEvent_t>(event);
182
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
183
+ const auto orig_device = getDevice();
184
+ setDevice(stream.device());
185
+ C10_HIP_CHECK(hipStreamWaitEvent(
186
+ hip_stream,
187
+ hip_event,
188
+ /*flags (must be zero)=*/ 0));
189
+ setDevice(orig_device);
190
+ }
191
+
192
+ bool queryEvent(void* event) const override {
193
+ if (!event) return true;
194
+ hipEvent_t hip_event = static_cast<hipEvent_t>(event);
195
+ const hipError_t err = hipEventQuery(hip_event);
196
+ if (err != hipErrorNotReady) C10_HIP_CHECK(err);
197
+ else {
198
+ // ignore and clear the error if not ready
199
+ (void)hipGetLastError();
200
+ }
201
+ return (err == hipSuccess);
202
+ }
203
+
204
+ // Stream-related functions
205
+ bool queryStream(const Stream& stream) const override {
206
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
207
+ return hip_stream.query();
208
+ }
209
+
210
+ void synchronizeStream(const Stream& stream) const override {
211
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
212
+ hip_stream.synchronize();
213
+ }
214
+
215
+ void recordDataPtrOnStream(
216
+ const c10::DataPtr& data_ptr,
217
+ const Stream& stream) const override {
218
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
219
+ HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA(data_ptr, hip_stream);
220
+ }
221
+ };
222
+
223
+ // All of the guards which have HIPGuardImpl burned in need to also have
224
+ // variants using HIPGuardImplMasqueradingAsCUDA.
225
+
226
+ /// This code is all a direct copy from c10/cuda/HIPGuardMasqueradingAsCUDA.h, but with
227
+ /// the correct InlineDeviceGuard burned in. Sorry about the
228
+ /// copy-pasting.
229
+
230
+ struct HIPGuardMasqueradingAsCUDA {
231
+ explicit HIPGuardMasqueradingAsCUDA() = delete;
232
+ explicit HIPGuardMasqueradingAsCUDA(DeviceIndex device_index) : guard_(device_index) {}
233
+ explicit HIPGuardMasqueradingAsCUDA(Device device) : guard_(device) {}
234
+
235
+ HIPGuardMasqueradingAsCUDA(const HIPGuardMasqueradingAsCUDA&) = delete;
236
+ HIPGuardMasqueradingAsCUDA& operator=(const HIPGuardMasqueradingAsCUDA&) = delete;
237
+ HIPGuardMasqueradingAsCUDA(HIPGuardMasqueradingAsCUDA&& other) = delete;
238
+ HIPGuardMasqueradingAsCUDA& operator=(HIPGuardMasqueradingAsCUDA&& other) = delete;
239
+
240
+ void set_device(Device device) { guard_.set_device(device); }
241
+ void reset_device(Device device) { guard_.reset_device(device); }
242
+ void set_index(DeviceIndex device_index) { guard_.set_index(device_index); }
243
+ Device original_device() const { return guard_.original_device(); }
244
+ Device current_device() const { return guard_.current_device(); }
245
+
246
+ private:
247
+ c10::impl::InlineDeviceGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
248
+ };
249
+
250
+ struct OptionalHIPGuardMasqueradingAsCUDA {
251
+ explicit OptionalHIPGuardMasqueradingAsCUDA() : guard_() {}
252
+ explicit OptionalHIPGuardMasqueradingAsCUDA(optional<Device> device_opt) : guard_(device_opt) {}
253
+ explicit OptionalHIPGuardMasqueradingAsCUDA(optional<DeviceIndex> device_index_opt) : guard_(device_index_opt) {}
254
+
255
+ OptionalHIPGuardMasqueradingAsCUDA(const OptionalHIPGuardMasqueradingAsCUDA&) = delete;
256
+ OptionalHIPGuardMasqueradingAsCUDA& operator=(const OptionalHIPGuardMasqueradingAsCUDA&) = delete;
257
+ OptionalHIPGuardMasqueradingAsCUDA(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete;
258
+ OptionalHIPGuardMasqueradingAsCUDA& operator=(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete;
259
+
260
+ void set_device(Device device) { guard_.set_device(device); }
261
+ void reset_device(Device device) { guard_.reset_device(device); }
262
+ void set_index(DeviceIndex device_index) { guard_.set_index(device_index); }
263
+ optional<Device> original_device() const { return guard_.original_device(); }
264
+ optional<Device> current_device() const { return guard_.current_device(); }
265
+ void reset() { guard_.reset(); }
266
+
267
+ private:
268
+ c10::impl::InlineOptionalDeviceGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
269
+ };
270
+
271
+ struct HIPStreamGuardMasqueradingAsCUDA {
272
+ explicit HIPStreamGuardMasqueradingAsCUDA() = delete;
273
+ explicit HIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {}
274
+ HIPStreamGuardMasqueradingAsCUDA(const HIPStreamGuardMasqueradingAsCUDA&) = delete;
275
+ HIPStreamGuardMasqueradingAsCUDA& operator=(const HIPStreamGuardMasqueradingAsCUDA&) = delete;
276
+ HIPStreamGuardMasqueradingAsCUDA(HIPStreamGuardMasqueradingAsCUDA&& other) = delete;
277
+ HIPStreamGuardMasqueradingAsCUDA& operator=(HIPStreamGuardMasqueradingAsCUDA&& other) = delete;
278
+
279
+ void reset_stream(Stream stream) { guard_.reset_stream(stream); }
280
+
281
+ HIPStreamMasqueradingAsCUDA original_stream() const {
282
+ return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.original_stream());
283
+ }
284
+ HIPStreamMasqueradingAsCUDA current_stream() const {
285
+ return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.current_stream());
286
+ }
287
+
288
+ Device current_device() const { return guard_.current_device(); }
289
+ Device original_device() const { return guard_.original_device(); }
290
+
291
+ private:
292
+ c10::impl::InlineStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
293
+ };
294
+
295
+ struct OptionalHIPStreamGuardMasqueradingAsCUDA {
296
+ explicit OptionalHIPStreamGuardMasqueradingAsCUDA() : guard_() {}
297
+ explicit OptionalHIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {}
298
+ explicit OptionalHIPStreamGuardMasqueradingAsCUDA(optional<Stream> stream_opt) : guard_(stream_opt) {}
299
+
300
+ OptionalHIPStreamGuardMasqueradingAsCUDA(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete;
301
+ OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete;
302
+ OptionalHIPStreamGuardMasqueradingAsCUDA(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete;
303
+ OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete;
304
+
305
+ void reset_stream(Stream stream) { guard_.reset_stream(stream); }
306
+
307
+ optional<HIPStreamMasqueradingAsCUDA> original_stream() const {
308
+ auto r = guard_.original_stream();
309
+ if (r.has_value()) {
310
+ return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
311
+ } else {
312
+ return nullopt;
313
+ }
314
+ }
315
+
316
+ optional<HIPStreamMasqueradingAsCUDA> current_stream() const {
317
+ auto r = guard_.current_stream();
318
+ if (r.has_value()) {
319
+ return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
320
+ } else {
321
+ return nullopt;
322
+ }
323
+ }
324
+
325
+ void reset() { guard_.reset(); }
326
+
327
+ private:
328
+ c10::impl::InlineOptionalStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
329
+ };
330
+
331
+ struct HIPMultiStreamGuardMasqueradingAsCUDA {
332
+ explicit HIPMultiStreamGuardMasqueradingAsCUDA(ArrayRef<HIPStreamMasqueradingAsCUDA> streams)
333
+ : guard_(unwrapStreams(streams)) {}
334
+
335
+ HIPMultiStreamGuardMasqueradingAsCUDA(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete;
336
+ HIPMultiStreamGuardMasqueradingAsCUDA& operator=(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete;
337
+ HIPMultiStreamGuardMasqueradingAsCUDA(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete;
338
+ HIPMultiStreamGuardMasqueradingAsCUDA& operator=(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete;
339
+
340
+ private:
341
+ c10::impl::InlineMultiStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
342
+
343
+ static std::vector<Stream> unwrapStreams(ArrayRef<HIPStreamMasqueradingAsCUDA> hipStreams) {
344
+ std::vector<Stream> streams;
345
+ streams.reserve(hipStreams.size());
346
+ for (const HIPStreamMasqueradingAsCUDA& hipStream : hipStreams) {
347
+ streams.push_back(hipStream);
348
+ }
349
+ return streams;
350
+ }
351
+ };
352
+
353
+ }} // namespace c10::hip
venv/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/hip/HIPStream.h>
4
+
5
+ // Use of c10::hip namespace here makes hipification easier, because
6
+ // I don't have to also fix namespaces. Sorry!
7
+ namespace c10 { namespace hip {
8
+
9
+ // See Note [Masquerading as CUDA] for motivation
10
+
11
+ class HIPStreamMasqueradingAsCUDA {
12
+ public:
13
+
14
+ enum Unchecked { UNCHECKED };
15
+
16
+ explicit HIPStreamMasqueradingAsCUDA(Stream stream)
17
+ : HIPStreamMasqueradingAsCUDA(UNCHECKED, stream) {
18
+ // We did the coercion unchecked; check that it was right.
19
+ TORCH_CHECK(stream.device().is_cuda() /* !!! */);
20
+ }
21
+
22
+ explicit HIPStreamMasqueradingAsCUDA(Unchecked, Stream stream)
23
+ // Unsafely coerce the "CUDA" stream into a HIP stream
24
+ : stream_(
25
+ HIPStream(
26
+ Stream(
27
+ Stream::UNSAFE,
28
+ Device(c10::DeviceType::HIP, stream.device_index()),
29
+ stream.id())
30
+ )
31
+ ) {}
32
+
33
+ // New constructor, just for this. Does NOT coerce.
34
+ explicit HIPStreamMasqueradingAsCUDA(HIPStream stream) : stream_(stream) {}
35
+
36
+ bool operator==(const HIPStreamMasqueradingAsCUDA& other) const noexcept {
37
+ return stream_ == other.stream_;
38
+ }
39
+
40
+ bool operator!=(const HIPStreamMasqueradingAsCUDA& other) const noexcept {
41
+ return stream_ != other.stream_;
42
+ }
43
+
44
+ operator hipStream_t() const { return stream_.stream(); }
45
+
46
+ operator Stream() const {
47
+ // Unsafely coerce HIP stream into a "CUDA" stream
48
+ return Stream(Stream::UNSAFE, device(), id());
49
+ }
50
+
51
+ DeviceIndex device_index() const { return stream_.device_index(); }
52
+
53
+ // Unsafely coerce HIP device into CUDA device
54
+ c10::DeviceType device_type() const { return c10::DeviceType::CUDA; }
55
+
56
+ Device device() const {
57
+ // Unsafely coerce HIP device into CUDA device
58
+ return Device(c10::DeviceType::CUDA, stream_.device_index());
59
+ }
60
+
61
+ StreamId id() const { return stream_.id(); }
62
+ bool query() const { return stream_.query(); }
63
+ void synchronize() const { stream_.synchronize(); }
64
+ int priority() const { return stream_.priority(); }
65
+ hipStream_t stream() const { return stream_.stream(); }
66
+
67
+ Stream unwrap() const {
68
+ // Unsafely coerce HIP stream into "CUDA" stream
69
+ return Stream(Stream::UNSAFE, device(), id());
70
+ }
71
+
72
+ c10::StreamData3 pack3() const noexcept {
73
+ // Unsafely coerce HIP stream into "CUDA" stream before packing
74
+ return unwrap().pack3();
75
+ }
76
+
77
+ static HIPStreamMasqueradingAsCUDA unpack3(StreamId stream_id,
78
+ DeviceIndex device_index,
79
+ c10::DeviceType device_type) {
80
+ // NB: constructor manages CUDA->HIP translation for us
81
+ return HIPStreamMasqueradingAsCUDA(Stream::unpack3(
82
+ stream_id, device_index, device_type));
83
+ }
84
+
85
+ static std::tuple<int, int> priority_range() { return HIPStream::priority_range(); }
86
+
87
+ // New method, gets the underlying HIPStream
88
+ HIPStream hip_stream() const { return stream_; }
89
+
90
+ private:
91
+ HIPStream stream_;
92
+ };
93
+
94
+ HIPStreamMasqueradingAsCUDA
95
+ inline getStreamFromPoolMasqueradingAsCUDA(const bool isHighPriority = false, DeviceIndex device = -1) {
96
+ return HIPStreamMasqueradingAsCUDA(getStreamFromPool(isHighPriority, device));
97
+ }
98
+
99
+ HIPStreamMasqueradingAsCUDA
100
+ inline getStreamFromExternalMasqueradingAsCUDA(hipStream_t ext_stream, DeviceIndex device) {
101
+ return HIPStreamMasqueradingAsCUDA(getStreamFromExternal(ext_stream, device));
102
+ }
103
+
104
+ inline HIPStreamMasqueradingAsCUDA getDefaultHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) {
105
+ return HIPStreamMasqueradingAsCUDA(getDefaultHIPStream(device_index));
106
+ }
107
+
108
+ inline HIPStreamMasqueradingAsCUDA getCurrentHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) {
109
+ return HIPStreamMasqueradingAsCUDA(getCurrentHIPStream(device_index));
110
+ }
111
+
112
+ inline void setCurrentHIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA stream) {
113
+ setCurrentHIPStream(stream.hip_stream());
114
+ }
115
+
116
+ inline std::ostream& operator<<(std::ostream& stream, const HIPStreamMasqueradingAsCUDA& s) {
117
+ stream << s.hip_stream() << " (masquerading as CUDA)";
118
+ return stream;
119
+ }
120
+
121
+ }} // namespace c10::hip
122
+
123
+ namespace std {
124
+ template <>
125
+ struct hash<c10::hip::HIPStreamMasqueradingAsCUDA> {
126
+ size_t operator()(c10::hip::HIPStreamMasqueradingAsCUDA s) const noexcept {
127
+ return std::hash<c10::Stream>{}(s.unwrap());
128
+ }
129
+ };
130
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/Copy.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+
5
+ namespace at {
6
+ namespace native {
7
+
8
+ Tensor& quantized_copy_from_float_(Tensor& self, const Tensor& src);
9
+ }
10
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/PackedParams.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/core/ivalue.h>
5
+
6
+ struct LinearPackedParamsBase : public torch::jit::CustomClassHolder {
7
+ virtual at::Tensor apply(
8
+ at::Tensor input,
9
+ double output_scale,
10
+ int64_t output_zero_point) = 0;
11
+ virtual at::Tensor apply_relu(
12
+ at::Tensor input,
13
+ double output_scale,
14
+ int64_t output_zero_point) = 0;
15
+
16
+ // out variant of LinearPackedParamsBase::apply
17
+ virtual at::Tensor& apply_out(
18
+ const at::Tensor& /*input*/,
19
+ double /*output_scale*/,
20
+ int64_t /*output_zero_point*/,
21
+ at::Tensor& output) {
22
+ throw std::runtime_error(
23
+ "apply_out is not implemented for this packed "
24
+ "parameter type");
25
+ return output;
26
+ }
27
+
28
+ virtual at::Tensor& apply_relu_out(
29
+ const at::Tensor& /*input*/,
30
+ double /*output_scale*/,
31
+ int64_t /*output_zero_point*/,
32
+ at::Tensor& output) {
33
+ throw std::runtime_error(
34
+ "apply_relu_out is not implemented for this packed "
35
+ "parameter type");
36
+ return output;
37
+ }
38
+
39
+ // Corresponding pattern (the ops with `*` are part of the pattern that
40
+ // represents the computation of quantized::linear_with_input_q_dq_qweight_dq_output_fp32):
41
+ // input -> q* -> dq* -> linear* ->
42
+ // qweight -> dq* /
43
+ //
44
+ // After fusion:
45
+ // input -> quantized::linear_with_input_q_dq_qweight_dq_output_fp32* ->
46
+ // qweight /
47
+ //
48
+ // Additional Note: the weight is packed as well
49
+ // Params:
50
+ // X: float32 Tensor, will be quantized to quint8 in the op
51
+ // W_prepack: packed qint8 quantized weight and bias
52
+ // Returns:
53
+ // Y: float32 Tensor
54
+ virtual at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32(
55
+ at::Tensor input,
56
+ double input_scale,
57
+ int64_t input_zero_point) {
58
+ throw std::runtime_error(
59
+ "apply_with_input_q_dq_qweight_dq_output_fp32 is not implemented for this packed "
60
+ "parameter type");
61
+ return {};
62
+ }
63
+
64
+ // Corresponding pattern (the ops with `*` are part of the pattern that
65
+ // represents the computation of quantized::linear_with_input_q_dq_qweight_dq_relu_output_fp32):
66
+ // input -> q* -> dq* -> linear* -> relu* ->
67
+ // qweight -> dq* /
68
+ //
69
+ // After fusion:
70
+ // input -> quantized::linear_with_input_q_dq_qweight_dq_relu_output_fp32* ->
71
+ // qweight /
72
+ //
73
+ // Additional Note: the weight is packed as well
74
+ // Params:
75
+ // input: float32 Tensor, will be quantized to quint8 in the op
76
+ // Returns:
77
+ // float32 Tensor
78
+ virtual at::Tensor apply_with_input_q_dq_qweight_dq_relu_output_fp32(
79
+ at::Tensor input,
80
+ double input_scale,
81
+ int64_t input_zero_point) {
82
+ throw std::runtime_error(
83
+ "apply_with_input_q_dq_qweight_dq_relu_output_fp32 is not implemented for this packed "
84
+ "parameter type");
85
+ return {};
86
+ }
87
+
88
+ virtual at::Tensor apply_dynamic(
89
+ at::Tensor input,
90
+ bool reduce_range = false) = 0;
91
+ virtual at::Tensor apply_dynamic_relu(
92
+ at::Tensor input,
93
+ bool reduce_range = false) = 0;
94
+
95
+ virtual at::Tensor& apply_dynamic_out(
96
+ const at::Tensor& /* input */,
97
+ at::Tensor& output,
98
+ bool /* reduce_range */) {
99
+ throw std::runtime_error(
100
+ "apply_dynamic_out is not implemented for this packed "
101
+ "parameter type");
102
+ return output;
103
+ }
104
+ virtual at::Tensor& apply_dynamic_relu_out(
105
+ const at::Tensor& /* input */,
106
+ at::Tensor& output,
107
+ bool /* reduce_range */) {
108
+ throw std::runtime_error(
109
+ "apply_dynamic_relu_out is not implemented for this packed "
110
+ "parameter type");
111
+ return output;
112
+ }
113
+
114
+ virtual std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() = 0;
115
+
116
+ virtual c10::optional<at::Tensor> bias() = 0;
117
+
118
+ virtual void set_bias(c10::optional<at::Tensor> /*bias*/) {
119
+ throw std::runtime_error(
120
+ "set_bias is not implemented for this packed "
121
+ "parameter type");
122
+ }
123
+ };
124
+
125
+ template <int kSpatialDim = 2>
126
+ struct ConvPackedParamsBase : public torch::jit::CustomClassHolder {
127
+ virtual at::Tensor apply(
128
+ const at::Tensor& input,
129
+ double output_scale,
130
+ int64_t output_zero_point) = 0;
131
+ virtual at::Tensor apply_relu(
132
+ const at::Tensor& input,
133
+ double output_scale,
134
+ int64_t output_zero_point) = 0;
135
+ virtual at::Tensor apply_dynamic(
136
+ const at::Tensor& input,
137
+ bool reduce_range) = 0;
138
+
139
+ virtual std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() = 0;
140
+
141
+ virtual torch::List<int64_t> stride() const = 0;
142
+ virtual torch::List<int64_t> padding() const = 0;
143
+ virtual torch::List<int64_t> output_padding() const = 0;
144
+ virtual torch::List<int64_t> dilation() const = 0;
145
+ virtual int64_t groups() const = 0;
146
+ virtual bool transpose() const = 0;
147
+ };
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/BinaryOps.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+
3
+ namespace at {
4
+ namespace native {
5
+ TORCH_API Tensor
6
+ quantized_add(Tensor qa, Tensor qb, double scale, int64_t zero_point);
7
+ }
8
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/EmbeddingPackedParams.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/core/ivalue.h>
5
+
6
+ struct EmbeddingPackedParamsBase : public torch::jit::CustomClassHolder {
7
+ virtual at::Tensor embeddingbag_byte(
8
+ const at::Tensor& indices,
9
+ const c10::optional<at::Tensor>& offsets,
10
+ bool pruned_weights,
11
+ const c10::optional<at::Tensor>& per_sample_weights_,
12
+ const c10::optional<at::Tensor>& compressed_indices_mapping,
13
+ bool include_last_offset,
14
+ bool is_embedding_op) = 0;
15
+
16
+ virtual at::Tensor embeddingbag_4bit(
17
+ const at::Tensor& indices,
18
+ const c10::optional<at::Tensor>& offsets,
19
+ bool pruned_weights,
20
+ const c10::optional<at::Tensor>& per_sample_weights_,
21
+ const c10::optional<at::Tensor>& compressed_indices_mapping,
22
+ bool include_last_offset,
23
+ bool is_embedding_op) = 0;
24
+
25
+ virtual at::Tensor unpack() = 0;
26
+
27
+ virtual int64_t bit_rate() const = 0;
28
+ virtual int64_t version() const = 0;
29
+ };
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/OnednnUtils.h ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #if AT_MKLDNN_ENABLED()
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/native/quantized/PackedParams.h>
7
+ #include <ideep.hpp>
8
+ #include <cpuinfo.h>
9
+
10
+ #include <c10/util/CallOnce.h>
11
+
12
+ using PrimitiveCacheKey = std::tuple<
13
+ double, // input_scale
14
+ int64_t, // input_zero_point
15
+ std::vector<int64_t>, // input_shape
16
+ double, // output_scale
17
+ int64_t, // output_zero_point
18
+ int64_t, // OMP_number_of_threads
19
+ double, // accum_scale
20
+ int64_t>; // accum_zero_point
21
+
22
+ enum CacheKeyIndex {
23
+ InputScale,
24
+ InputZeroPoint,
25
+ InputShape,
26
+ OutputScale,
27
+ OutputZeroPoint,
28
+ NumOfThreads,
29
+ };
30
+
31
+ // Base class of primitive cache
32
+ struct PrimitiveCache {
33
+ PrimitiveCacheKey key;
34
+
35
+ bool hit(const PrimitiveCacheKey& key) {
36
+ return this->key == key;
37
+ }
38
+ };
39
+
40
+ using LinearParams = ideep::matmul_forward_params;
41
+ using Conv = dnnl::convolution_forward;
42
+ using ConvDesc = dnnl::convolution_forward::primitive_desc;
43
+ using ConvParams = ideep::convolution_forward_params;
44
+ using Deconv = dnnl::deconvolution_forward;
45
+ using DeconvDesc = dnnl::deconvolution_forward::primitive_desc;
46
+ using DeconvParams = ideep::deconv_forward_params;
47
+
48
+ struct LinearPrimitiveCache : PrimitiveCache {
49
+ LinearPrimitiveCache() {}
50
+
51
+ LinearPrimitiveCache(
52
+ const PrimitiveCacheKey& key,
53
+ const LinearParams& param) {
54
+ this->key = key;
55
+ this->param = param;
56
+ }
57
+
58
+ LinearParams param;
59
+
60
+ // For dynamic qlinear, scale and zero point
61
+ // are set at execution time. So we only need to compare
62
+ // the rest part of key.
63
+ bool hit_dynamic(const PrimitiveCacheKey& new_key) {
64
+ auto cached_input_shape = std::get<InputShape>(this->key);
65
+ auto new_input_shape = std::get<InputShape>(new_key);
66
+ return (
67
+ cached_input_shape == new_input_shape &&
68
+ std::get<NumOfThreads>(this->key) == std::get<NumOfThreads>(new_key));
69
+ }
70
+
71
+ LinearParams& get_param() {
72
+ return param;
73
+ }
74
+ };
75
+
76
+ struct ConvPrimitiveCache : PrimitiveCache {
77
+ ConvPrimitiveCache() {}
78
+
79
+ ConvPrimitiveCache(
80
+ const PrimitiveCacheKey& key,
81
+ const ConvParams& params) {
82
+ this->key = key;
83
+ this->params = params;
84
+ }
85
+
86
+ ConvParams params;
87
+
88
+ ConvParams& get_params() {
89
+ return params;
90
+ }
91
+ };
92
+
93
+ struct DeconvPrimitiveCache : PrimitiveCache {
94
+ DeconvPrimitiveCache() {}
95
+
96
+ DeconvPrimitiveCache(
97
+ const PrimitiveCacheKey& key,
98
+ const DeconvParams& params) {
99
+ this->key = key;
100
+ this->params = params;
101
+ }
102
+
103
+ DeconvParams params;
104
+
105
+ DeconvParams& get_params() {
106
+ return params;
107
+ }
108
+ };
109
+
110
+ enum PostOps {
111
+ NoPostOp,
112
+ Relu,
113
+ LeakyRelu,
114
+ Tanh,
115
+ Gelu
116
+ };
117
+
118
+ static std::unordered_map<std::string, PostOps> POST_OP_TABLE = {
119
+ {"none", NoPostOp},
120
+ {"relu", Relu},
121
+ {"leaky_relu", LeakyRelu},
122
+ {"tanh", Tanh},
123
+ {"gelu", Gelu}
124
+ };
125
+
126
+ struct PackedLinearWeightsOnednn : public LinearPackedParamsBase {
127
+ PackedLinearWeightsOnednn(
128
+ std::unique_ptr<ideep::tensor> weight,
129
+ c10::optional<ideep::tensor> bias,
130
+ at::Tensor orig_weight,
131
+ c10::optional<at::Tensor> orig_bias)
132
+ : weight_(std::move(weight)),
133
+ bias_(std::move(bias)),
134
+ orig_weight_(std::move(orig_weight)),
135
+ orig_bias_(std::move(orig_bias)) {
136
+ cache_initialized_flag = std::make_unique<c10::once_flag>();
137
+ }
138
+ std::unique_ptr<ideep::tensor> weight_;
139
+ c10::optional<ideep::tensor> bias_;
140
+ at::Tensor orig_weight_;
141
+ c10::optional<at::Tensor> orig_bias_;
142
+
143
+ at::Tensor apply(
144
+ at::Tensor input,
145
+ double output_scale,
146
+ int64_t output_zero_point) override;
147
+ at::Tensor apply_relu(
148
+ at::Tensor input,
149
+ double output_scale,
150
+ int64_t output_zero_point) override;
151
+
152
+ at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override;
153
+ at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override;
154
+
155
+ at::Tensor apply_leaky_relu(
156
+ at::Tensor input,
157
+ double output_scale,
158
+ int64_t output_zero_point,
159
+ double negative_slope);
160
+
161
+ at::Tensor apply_tanh(
162
+ at::Tensor input,
163
+ double output_scale,
164
+ int64_t output_zero_point);
165
+
166
+ std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
167
+
168
+ c10::optional<at::Tensor> bias() override {
169
+ return orig_bias_;
170
+ }
171
+
172
+ static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
173
+ at::Tensor weight,
174
+ c10::optional<at::Tensor> bias);
175
+
176
+ private:
177
+ LinearPrimitiveCache prim_cache;
178
+ std::unique_ptr<c10::once_flag> cache_initialized_flag;
179
+
180
+ template <PostOps post_op>
181
+ at::Tensor apply_impl(
182
+ at::Tensor input,
183
+ double output_scale,
184
+ int64_t output_zero_point,
185
+ torch::List<at::Scalar> post_op_args = torch::List<at::Scalar>());
186
+
187
+ template <bool ReluFused>
188
+ at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range=false);
189
+
190
+ LinearPrimitiveCache& get_cache() {
191
+ return prim_cache;
192
+ }
193
+ };
194
+
195
+ template <int kSpatialDim = 2>
196
+ struct PackedConvWeightsOnednn : public ConvPackedParamsBase<kSpatialDim> {
197
+ PackedConvWeightsOnednn(
198
+ std::unique_ptr<ideep::tensor> weight,
199
+ c10::optional<ideep::tensor> bias,
200
+ at::Tensor orig_weight,
201
+ c10::optional<at::Tensor> orig_bias,
202
+ torch::List<int64_t> stride,
203
+ torch::List<int64_t> padding,
204
+ torch::List<int64_t> output_padding,
205
+ torch::List<int64_t> dilation,
206
+ int64_t groups,
207
+ uint8_t transpose)
208
+ : weight_(std::move(weight)),
209
+ bias_(std::move(bias)),
210
+ orig_weight_(std::move(orig_weight)),
211
+ orig_bias_(std::move(orig_bias)),
212
+ stride_(std::move(stride)),
213
+ padding_(std::move(padding)),
214
+ output_padding_(std::move(output_padding)),
215
+ dilation_(std::move(dilation)),
216
+ groups_(groups),
217
+ transpose_(transpose) {
218
+ cache_initialized_flag = std::make_unique<c10::once_flag>();
219
+ }
220
+
221
+ std::unique_ptr<ideep::tensor> weight_;
222
+ c10::optional<ideep::tensor> bias_;
223
+ at::Tensor orig_weight_;
224
+ c10::optional<at::Tensor> orig_bias_;
225
+ torch::List<int64_t> stride_;
226
+ torch::List<int64_t> padding_;
227
+ torch::List<int64_t> output_padding_;
228
+ torch::List<int64_t> dilation_;
229
+ int64_t groups_;
230
+ uint8_t transpose_;
231
+
232
+ at::Tensor apply(
233
+ const at::Tensor& input,
234
+ double output_scale,
235
+ int64_t output_zero_point) override;
236
+
237
+ at::Tensor apply_relu(
238
+ const at::Tensor& input,
239
+ double output_scale,
240
+ int64_t output_zero_point) override;
241
+
242
+ at::Tensor apply_dynamic(
243
+ const at::Tensor& input,
244
+ bool reduce_range) override;
245
+
246
+ at::Tensor apply_add(
247
+ const at::Tensor& input,
248
+ const at::Tensor& accum,
249
+ double output_scale,
250
+ int64_t output_zero_point);
251
+
252
+ at::Tensor apply_add_relu(
253
+ const at::Tensor& input,
254
+ const at::Tensor& accum,
255
+ double output_scale,
256
+ int64_t output_zero_point);
257
+
258
+ std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
259
+
260
+ static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
261
+ at::Tensor weight,
262
+ c10::optional<at::Tensor> bias,
263
+ torch::List<int64_t> stride,
264
+ torch::List<int64_t> padding,
265
+ torch::List<int64_t> output_padding,
266
+ torch::List<int64_t> dilation,
267
+ int64_t groups,
268
+ bool transpose);
269
+
270
+ torch::List<int64_t> stride() const override {
271
+ return stride_;
272
+ }
273
+
274
+ torch::List<int64_t> padding() const override {
275
+ return padding_;
276
+ }
277
+
278
+ torch::List<int64_t> output_padding() const override {
279
+ return output_padding_;
280
+ }
281
+
282
+ torch::List<int64_t> dilation() const override {
283
+ return dilation_;
284
+ }
285
+
286
+ int64_t groups() const override {
287
+ return groups_;
288
+ }
289
+
290
+ bool transpose() const override {
291
+ return (bool)transpose_;
292
+ }
293
+
294
+ private:
295
+ ConvPrimitiveCache conv_prim_cache;
296
+ DeconvPrimitiveCache deconv_prim_cache;
297
+ std::unique_ptr<c10::once_flag> cache_initialized_flag;
298
+
299
+ template <bool ReluFused>
300
+ at::Tensor apply_impl(
301
+ const at::Tensor& input,
302
+ const c10::optional<at::Tensor>& accum,
303
+ double output_scale,
304
+ int64_t output_zero_point);
305
+
306
+ ConvPrimitiveCache& get_conv_cache() {
307
+ assert(!transpose());
308
+ return conv_prim_cache;
309
+ }
310
+
311
+ DeconvPrimitiveCache& get_deconv_cache() {
312
+ assert(transpose());
313
+ return deconv_prim_cache;
314
+ }
315
+ };
316
+
317
+ namespace onednn_utils {
318
+
319
+ static ideep::attr_t create_attr_by_post_op(
320
+ const std::string& post_op_name,
321
+ const torch::List<c10::optional<at::Scalar>>& post_op_args,
322
+ const dnnl::algorithm post_algorithm) {
323
+ using ideep::tensor;
324
+ PostOps post_op = POST_OP_TABLE[post_op_name];
325
+ if (post_op == Relu) {
326
+ return ideep::attr_t::fuse_relu();
327
+ } else if (post_op == LeakyRelu) {
328
+ return ideep::attr_t::fuse_relu_v2(/*alpha=*/post_op_args[0].value().to<float>());
329
+ } else if (post_op == Tanh) {
330
+ return ideep::attr_t::fuse_tanh();
331
+ } else if (post_op == Gelu) {
332
+ return ideep::attr_t::fuse_gelu_v2(0.f, 0.f, post_algorithm);
333
+ }
334
+ return ideep::attr_t();
335
+ }
336
+
337
+ // Try to reorder tensor to expected desc at runtime
338
+ // Do it in a `try...catch...` manner to avoid oneDNN's errors
339
+ // TODO: Move it to third_party/ideep
340
+ static void try_reorder(
341
+ ideep::tensor& t,
342
+ const ideep::tensor::desc&& desc,
343
+ ideep::scale_t scales) {
344
+ if (t.get_desc() != desc) {
345
+ try {
346
+ t = t.reorder_if_differ_in(desc);
347
+ } catch (...) {
348
+ ideep::tensor&& plain = t.to_public(nullptr, t.get_data_type());
349
+ t = plain.reorder_if_differ_in(desc);
350
+ }
351
+ t.set_scale(scales);
352
+ }
353
+ }
354
+
355
+ // ONEDNN requires symmetric quantization of weight
356
+ // Use this util function to check.
357
+ static bool is_weight_symmetric_quant(
358
+ const at::Tensor& weight,
359
+ bool is_transposed_conv) {
360
+ bool is_symmetric = true;
361
+ const auto qtype = weight.qscheme();
362
+ if (qtype == c10::kPerTensorAffine) {
363
+ is_symmetric &= (weight.q_zero_point() == 0);
364
+ } else if (qtype == c10::kPerChannelAffine) {
365
+ if (is_transposed_conv) {
366
+ // This case is currently not supported in PyTorch
367
+ // but we do not want to raise an error in this util function.
368
+ is_symmetric = false;
369
+ } else {
370
+ auto output_channels = weight.size(0);
371
+ for (int i = 0; i < output_channels; ++i) {
372
+ auto zp = weight.q_per_channel_zero_points()[i].item<int32_t>();
373
+ is_symmetric &= (zp == 0);
374
+ }
375
+ }
376
+ } else {
377
+ // This case is currently not supported in PyTorch
378
+ // but we do not want to raise an error in this util function.
379
+ is_symmetric = false;
380
+ }
381
+ return is_symmetric;
382
+ }
383
+
384
+ // When qengine is x86, use this util func to check if onednn kernel
385
+ // is preferred than fbgemm's to get better performance.
386
+ static bool should_use_onednn_quant(
387
+ const at::Tensor& weight,
388
+ bool is_transposed_conv,
389
+ int groups,
390
+ torch::List<int64_t> output_padding) {
391
+ // Performance of onednn is only validated on Linux right now.
392
+ // Also, the heuristics for dispatching are based on perf data on Linux.
393
+ // So, for x86 qengine, we always use fbgemm kernels if OS is not Linux.
394
+ // TODO Support more OSs.
395
+ #if !defined(__linux__)
396
+ return false;
397
+ #else
398
+ bool vnni_available = cpuinfo_has_x86_avx512vnni();
399
+ bool w_sym_quant =
400
+ is_weight_symmetric_quant(weight, is_transposed_conv);
401
+ bool opad_all_zero =
402
+ std::all_of(output_padding.begin(), output_padding.end(), [](int i) { return i==0; });
403
+ return vnni_available && (groups <= 100) && w_sym_quant && opad_all_zero;
404
+ #endif
405
+ }
406
+
407
+ } // onednn_utils
408
+
409
+ at::Tensor _qconv_prepack_onednn(
410
+ at::Tensor weight, // from CPU backend instead of QuantizedCPU
411
+ at::Tensor weight_scales, // Weight zero points must be 0 for onednn
412
+ double input_scale,
413
+ int64_t input_zero_point,
414
+ torch::List<int64_t> stride,
415
+ torch::List<int64_t> padding,
416
+ torch::List<int64_t> dilation,
417
+ int64_t groups,
418
+ c10::optional<torch::List<int64_t>> input_shape=c10::nullopt);
419
+
420
+ static at::Tensor _quantized_convolution_onednn(
421
+ at::Tensor act, // contains quantized values but not QTensor
422
+ double act_scale,
423
+ int64_t act_zero_point,
424
+ at::Tensor weight, // MKLDNN tensor with quantized values
425
+ at::Tensor weight_scales,
426
+ at::Tensor weight_zero_points,
427
+ c10::optional<at::Tensor> bias, // Bias is packed if not None
428
+ torch::List<int64_t> stride,
429
+ torch::List<int64_t> padding,
430
+ torch::List<int64_t> dilation,
431
+ bool transposed,
432
+ int64_t groups,
433
+ double inv_output_scale,
434
+ int64_t output_zero_point,
435
+ c10::optional<at::Tensor> accum=c10::nullopt, // accum to fused with conv add
436
+ double accum_scale=1.0,
437
+ int64_t accum_zero_point=0,
438
+ bool fp32_output=false,
439
+ c10::optional<c10::string_view> binary_attr=c10::nullopt,
440
+ c10::optional<at::Scalar> binary_alpha=c10::nullopt,
441
+ c10::optional<c10::string_view> unary_attr=c10::nullopt,
442
+ torch::List<c10::optional<at::Scalar>> unary_scalars=torch::List<c10::optional<at::Scalar>>(),
443
+ c10::optional<c10::string_view> unary_algorithm=c10::nullopt);
444
+
445
+ #endif // #if AT_MKLDNN_ENABLED()
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QnnpackUtils.h ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_PYTORCH_QNNPACK
4
+ #include <ATen/core/Tensor.h>
5
+ #include <c10/util/irange.h>
6
+ #include <pytorch_qnnpack.h>
7
+ #include <qnnpack_func.h>
8
+ #include <ATen/native/quantized/cpu/XnnpackUtils.h>
9
+ #include <ATen/native/quantized/PackedParams.h>
10
+ #include <ATen/native/utils/Factory.h>
11
+
12
+ #ifndef AT_PER_OPERATOR_HEADERS
13
+ #include <ATen/Functions.h>
14
+ #else
15
+ #include <ATen/ops/empty.h>
16
+ #endif
17
+
18
+ #include <utility>
19
+ inline int kPaddingChannels = 8;
20
+ struct QnnpackOperatorDeleter {
21
+ void operator()(pytorch_qnnp_operator_t op) {
22
+ pytorch_qnnp_delete_operator(op);
23
+ }
24
+ };
25
+
26
+ // PackedWeight struct for QNNPACK stores the original Weight and Bias as
27
+ // QNNPACK currently does not support an unpack function.
28
+ // For PyTorch Mobile, once the model is scripted and serialized we don't need
29
+ // to call unpack, so we can save some memory by checking for this case and free
30
+ // the original weights after packing.
31
+ // Input scale is set to null in pre-pack step. QNNPACK needs bias quantized
32
+ // with input scale which is available at runtime in pytorch. During runtime if
33
+ // input scale value changes then we requantize bias with the updated scale. For
34
+ // inference we expect the graph to be static so the input scale should not
35
+ // change across consecutive inference calls.
36
+ struct PackedLinearWeightsQnnp : public LinearPackedParamsBase {
37
+ PackedLinearWeightsQnnp(
38
+ std::unique_ptr<qnnpack::PackBMatrix> w,
39
+ at::Tensor orig_weight,
40
+ at::Tensor bias,
41
+ c10::optional<double> input_scale,
42
+ at::Tensor w_scales,
43
+ std::vector<uint8_t>&& w_zps)
44
+ : w(std::move(w)),
45
+ orig_weight(std::move(orig_weight)),
46
+ bias_(at::native::mobile::allocate_padded_contiguous_if_needed(
47
+ bias, bias.suggest_memory_format())),
48
+ per_channel_(this->orig_weight.qscheme() == at::kPerChannelAffine),
49
+ input_scale(std::move(input_scale)),
50
+ w_scales(std::move(w_scales)),
51
+ w_zero_points(std::move(w_zps)),
52
+ q_scheme(this->orig_weight.qscheme()) {
53
+ weight_sizes = this->orig_weight.sizes().vec();
54
+ }
55
+
56
+ std::unique_ptr<qnnpack::PackBMatrix> w;
57
+ at::Tensor orig_weight;
58
+ at::Tensor bias_;
59
+ bool per_channel_;
60
+ c10::optional<double> input_scale;
61
+ at::Tensor w_scales;
62
+ std::vector<uint8_t> w_zero_points;
63
+ std::vector<float> requantization_scales;
64
+ std::vector<int64_t> weight_sizes;
65
+ c10::QScheme q_scheme;
66
+
67
+ at::Tensor apply(
68
+ at::Tensor input,
69
+ double output_scale,
70
+ int64_t output_zero_point) override;
71
+ at::Tensor apply_relu(
72
+ at::Tensor input,
73
+ double output_scale,
74
+ int64_t output_zero_point) override;
75
+
76
+ at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override;
77
+ at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override;
78
+
79
+ std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
80
+
81
+ c10::optional<at::Tensor> bias() override {
82
+ return bias_;
83
+ }
84
+
85
+ static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
86
+ at::Tensor weight,
87
+ c10::optional<at::Tensor> bias);
88
+
89
+ bool per_channel() const {
90
+ return per_channel_;
91
+ }
92
+
93
+ private:
94
+ std::mutex qnnp_mutex_;
95
+
96
+ #ifdef USE_XNNPACK
97
+ xnnpack_operator xnnp_linear_op;
98
+
99
+ template <typename scalar_t, bool kReluFused>
100
+ at::Tensor apply_impl_xnnp(
101
+ const at::Tensor& input,
102
+ double output_scale,
103
+ int64_t output_zero_point);
104
+ #endif // USE_XNNPACK
105
+
106
+ template <bool ReluFused>
107
+ at::Tensor apply_impl(
108
+ at::Tensor input,
109
+ double output_scale,
110
+ int64_t output_zero_point);
111
+
112
+ template <bool ReluFused>
113
+ at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range);
114
+ };
115
+
116
+ template <int kSpatialDim = 2>
117
+ struct PackedConvWeightsQnnp : public ConvPackedParamsBase<kSpatialDim> {
118
+ PackedConvWeightsQnnp(
119
+ std::unique_ptr<qnnpack::PrePackConvWeights> w,
120
+ at::Tensor orig_weight,
121
+ at::Tensor bias,
122
+ torch::List<int64_t> stride,
123
+ torch::List<int64_t> padding,
124
+ torch::List<int64_t> output_padding,
125
+ torch::List<int64_t> dilation,
126
+ int64_t groups,
127
+ bool transpose,
128
+ c10::optional<double> input_scale,
129
+ std::vector<int64_t> kernel,
130
+ at::Tensor w_scale,
131
+ std::vector<uint8_t>&& w_zps,
132
+ bool is_per_channel)
133
+ : w(std::move(w)),
134
+ orig_weight(std::move(orig_weight)),
135
+ bias(std::move(bias)),
136
+ stride_(std::move(stride)),
137
+ padding_(std::move(padding)),
138
+ output_padding_(std::move(output_padding)),
139
+ dilation_(std::move(dilation)),
140
+ groups_(groups),
141
+ transpose_(transpose),
142
+ is_per_channel_(is_per_channel),
143
+ input_scale(input_scale),
144
+ kernel_(std::move(kernel)),
145
+ w_scales(std::move(w_scale)),
146
+ w_zero_points(std::move(w_zps)) {
147
+ const bool any_padding = std::any_of(
148
+ padding_.begin(), padding_.end(), [](const auto& e) { return e != 0; });
149
+ const size_t kernel_size =
150
+ std::accumulate(kernel_.begin(), kernel_.end(), 1, std::multiplies<>());
151
+
152
+ const size_t group_input_channels = transpose
153
+ ? this->orig_weight.size(0) / groups
154
+ : this->orig_weight.size(1);
155
+ const size_t group_output_channels = transpose
156
+ ? this->orig_weight.size(1)
157
+ : this->orig_weight.size(0) / groups;
158
+
159
+ const size_t kernel_depth = kSpatialDim == 3 ? kernel_[0] : 1;
160
+ const size_t kernel_height = kernel_[kSpatialDim - 2];
161
+ const size_t kernel_width = kernel_[kSpatialDim - 1];
162
+
163
+ pytorch_qnnp_ukernel_type ukernel_type;
164
+ if (transpose_) {
165
+ ukernel_type = pytorch_qnnp_ukernel_type_conv;
166
+ } else {
167
+ ukernel_type = pytorch_qnnp_ukernel_type_none;
168
+
169
+ const bool has_depthwise_dimensions =
170
+ (kSpatialDim == 2 &&
171
+ ((kernel_height == 3 && kernel_width == 3) ||
172
+ (kernel_height == 5 && kernel_width == 5))) ||
173
+ (kSpatialDim == 3 && kernel_height == 3 && kernel_width == 3 &&
174
+ kernel_depth == 3);
175
+ const bool has_depthwise_grouping =
176
+ group_input_channels == 1 && group_output_channels == 1 && groups > 1;
177
+
178
+ if (has_depthwise_dimensions && has_depthwise_grouping) {
179
+ ukernel_type = pytorch_qnnp_ukernel_type_dwconv;
180
+ } else if (
181
+ kernel_size == 1 &&
182
+ std::all_of(
183
+ stride_.begin(),
184
+ stride_.end(),
185
+ [](const auto& e) { return e == 1; }) &&
186
+ !any_padding) {
187
+ ukernel_type = group_input_channels >= SIZE_MAX
188
+ ? pytorch_qnnp_ukernel_type_xzp_gemm
189
+ : pytorch_qnnp_ukernel_type_gemm;
190
+ } else {
191
+ ukernel_type = pytorch_qnnp_ukernel_type_conv;
192
+ }
193
+ }
194
+
195
+ if (is_per_channel && ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) {
196
+ TORCH_INTERNAL_ASSERT(
197
+ false, "Per channel quantized weights are not supported for XZP kernels");
198
+ }
199
+
200
+ pytorch_qnnp_operator_t convolution{nullptr};
201
+ // Initially all the params are set to zero.
202
+ convolution = static_cast<pytorch_qnnp_operator_t>(
203
+ calloc(1, sizeof(struct pytorch_qnnp_operator)));
204
+ if (convolution == nullptr) {
205
+ TORCH_INTERNAL_ASSERT(
206
+ false, "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
207
+ sizeof(struct pytorch_qnnp_operator));
208
+ }
209
+
210
+ convolution_op =
211
+ std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter>(
212
+ convolution);
213
+
214
+ // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
215
+ convolution->ukernel_type = ukernel_type;
216
+ convolution->groups = groups;
217
+ convolution->group_input_channels = group_input_channels;
218
+ convolution->group_output_channels = group_output_channels;
219
+ convolution->kernel_depth = kernel_depth;
220
+ convolution->kernel_height = kernel_height;
221
+ convolution->kernel_width = kernel_width;
222
+ convolution->stride_depth = kSpatialDim == 3 ? stride_[0] : 1;
223
+ convolution->stride_height = stride_[kSpatialDim - 2];
224
+ convolution->stride_width = stride_[kSpatialDim - 1];
225
+ convolution->dilation_depth = kSpatialDim == 3 ? dilation_[0] : 1;
226
+ convolution->dilation_height = dilation_[kSpatialDim - 2];
227
+ convolution->dilation_width = dilation_[kSpatialDim - 1];
228
+ convolution->input_padding_height = padding_[kSpatialDim - 2];
229
+ convolution->input_padding_width = padding_[kSpatialDim - 1];
230
+ convolution->input_padding_depth = kSpatialDim == 3 ? padding_[0] : 0;
231
+ convolution->per_channel = is_per_channel_;
232
+ convolution->transpose = transpose_;
233
+
234
+ const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
235
+ const size_t k_stride = (group_input_channels + (kr - 1)) & -kr;
236
+
237
+ size_t zero_size = sizeof(uint8_t) * k_stride;
238
+ size_t zero_offset = 0;
239
+
240
+ if (transpose_) {
241
+ convolution->adjustment_width = output_padding_[1];
242
+ convolution->adjustment_height = output_padding_[0];
243
+ if (group_input_channels < 8) {
244
+ zero_size += 8;
245
+ zero_offset = 8;
246
+ }
247
+ } else {
248
+ zero_buffer_size = 0;
249
+ if (any_padding) {
250
+ zero_size = 0;
251
+ zero_offset = 0;
252
+ if (ukernel_type == pytorch_qnnp_ukernel_type_dwconv) {
253
+ const uint32_t cr = pytorch_qnnp_params.q8dw9.cr;
254
+ const size_t group_stride = (groups + (cr - 1)) & -cr;
255
+ if (groups >= 8) {
256
+ zero_size = sizeof(uint8_t) * group_stride;
257
+ zero_offset = 0;
258
+ } else {
259
+ zero_size = sizeof(uint8_t) * group_stride + 8;
260
+ zero_offset = sizeof(uint8_t) * 8;
261
+ }
262
+ } else if (
263
+ ukernel_type == pytorch_qnnp_ukernel_type_conv ||
264
+ ukernel_type == pytorch_qnnp_ukernel_type_gemm) {
265
+ if (group_input_channels >= 8) {
266
+ zero_size = sizeof(uint8_t) * k_stride;
267
+ zero_offset = 0;
268
+ } else {
269
+ zero_size = sizeof(uint8_t) * k_stride + 8;
270
+ zero_offset = 8;
271
+ }
272
+ }
273
+ }
274
+ }
275
+
276
+ // NOLINTNEXTLINE(clang-analyzer-optin.portability.UnixAPI)
277
+ void* zero_buffer = malloc(zero_size);
278
+ if (zero_buffer == nullptr) {
279
+ pytorch_qnnp_delete_operator(convolution);
280
+ TORCH_INTERNAL_ASSERT(
281
+ false, "failed to allocate %zu bytes for zero padding",
282
+ zero_size);
283
+ }
284
+ // Need to set to input zero point
285
+ // memset(zero_buffer, input_zero_point, zero_size);
286
+ zero_buffer_size = zero_size;
287
+ convolution->zero_buffer = zero_buffer;
288
+ convolution->zero_pointer = (void*)((uintptr_t)zero_buffer + zero_offset);
289
+ }
290
+
291
+ std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter> convolution_op;
292
+ #ifdef USE_XNNPACK
293
+ xnnpack_operator xnnp_convolution_op;
294
+ #endif // USE_XNNPACK
295
+ std::unique_ptr<qnnpack::PrePackConvWeights> w;
296
+ at::Tensor orig_weight;
297
+ at::Tensor bias;
298
+ torch::List<int64_t> stride_;
299
+ torch::List<int64_t> padding_;
300
+ torch::List<int64_t> output_padding_;
301
+ torch::List<int64_t> dilation_;
302
+ int64_t groups_;
303
+ bool transpose_;
304
+ bool is_per_channel_;
305
+ c10::optional<double> input_scale;
306
+ std::vector<int64_t> kernel_;
307
+ at::Tensor w_scales;
308
+ std::vector<uint8_t> w_zero_points;
309
+ std::vector<float> requantization_scales;
310
+ size_t zero_buffer_size;
311
+
312
+ at::Tensor apply(
313
+ const at::Tensor& input,
314
+ double output_scale,
315
+ int64_t output_zero_point) override;
316
+
317
+ at::Tensor apply_relu(
318
+ const at::Tensor& input,
319
+ double output_scale,
320
+ int64_t output_zero_point) override;
321
+
322
+ at::Tensor apply_dynamic(
323
+ const at::Tensor& input,
324
+ bool reduce_range=false) override;
325
+
326
+ std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
327
+
328
+ static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
329
+ at::Tensor weight,
330
+ c10::optional<at::Tensor> bias,
331
+ torch::List<int64_t> stride,
332
+ torch::List<int64_t> padding,
333
+ torch::List<int64_t> output_padding,
334
+ torch::List<int64_t> dilation,
335
+ int64_t groups,
336
+ bool transpose);
337
+
338
+ torch::List<int64_t> stride() const override {
339
+ return stride_;
340
+ }
341
+
342
+ torch::List<int64_t> padding() const override {
343
+ return padding_;
344
+ }
345
+
346
+ torch::List<int64_t> output_padding() const override {
347
+ return output_padding_;
348
+ }
349
+
350
+ torch::List<int64_t> dilation() const override {
351
+ return dilation_;
352
+ }
353
+
354
+ int64_t groups() const override {
355
+ return groups_;
356
+ }
357
+
358
+ bool transpose() const override {
359
+ return transpose_;
360
+ }
361
+
362
+ bool per_channel() const {
363
+ return is_per_channel_;
364
+ }
365
+
366
+ private:
367
+ std::mutex qnnp_mutex_;
368
+ template <bool ReluFused>
369
+ at::Tensor apply_impl(
370
+ const at::Tensor& input,
371
+ double output_scale,
372
+ int64_t output_zero_point);
373
+
374
+ #ifdef USE_XNNPACK
375
+ template <typename scalar_t, bool ReluFused>
376
+ at::Tensor apply_impl_xnnp(
377
+ const at::Tensor& input,
378
+ double output_scale,
379
+ int64_t output_zero_point);
380
+ #endif // USE_XNNPACK
381
+ };
382
+
383
+ enum class Activation : uint8_t { NONE = 0, RELU = 1 };
384
+
385
+ #if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
386
+ template <class T>
387
+ inline float Round(const float x) {
388
+ return ::nearbyintf(x);
389
+ }
390
+ inline double Round(const double x) {
391
+ return ::nearbyint(x);
392
+ }
393
+ #else
394
+ template <class T>
395
+ inline T Round(const T x) {
396
+ return std::nearbyint(x);
397
+ }
398
+ #endif
399
+
400
+ template<typename T>
401
+ inline T QuantizeValue(float scale, int32_t zero_point, float value) {
402
+ const int32_t qmin = std::numeric_limits<T>::min();
403
+ const int32_t qmax = std::numeric_limits<T>::max();
404
+ auto r = zero_point + static_cast<int32_t>(Round(value / scale));
405
+ r = std::max(r, qmin);
406
+ r = std::min(r, qmax);
407
+ return static_cast<T>(r);
408
+ }
409
+
410
+ template<typename T>
411
+ inline std::pair<T, T> activationLimits(
412
+ float scale,
413
+ int32_t zero_point,
414
+ Activation Ac) {
415
+ switch (Ac) {
416
+ case Activation::NONE:
417
+ return {std::numeric_limits<T>::min(),
418
+ std::numeric_limits<T>::max()};
419
+ case Activation::RELU:
420
+ return {QuantizeValue<T>(scale, zero_point, 0.0),
421
+ std::numeric_limits<T>::max()};
422
+ default:
423
+ #ifdef _MSC_VER
424
+ __assume(0);
425
+ #else
426
+ __builtin_unreachable();
427
+ #endif
428
+ }
429
+ }
430
+
431
+ namespace at {
432
+ namespace native {
433
+ namespace qnnp_avgpool_helper {
434
+ Tensor qnnpack_avg_pool2d(
435
+ Tensor input,
436
+ IntArrayRef kernel_size,
437
+ IntArrayRef stride,
438
+ IntArrayRef padding,
439
+ bool ceil_mode,
440
+ bool count_include_pad,
441
+ c10::optional<int64_t> divisor_override);
442
+ } // qnnp_avgpool_helper
443
+ } // namespace native
444
+ } // namespace at
445
+
446
+ namespace {
447
+ C10_UNUSED std::vector<float> generate_requantization_scales(
448
+ const at::Tensor& weight_scales,
449
+ const float input_scale,
450
+ const float output_scale,
451
+ std::vector<float>& requant_scales) {
452
+ // Since weight scale is allocated with padding
453
+ // weight_scales.numel() gives us padded num elements.
454
+ const auto num_output_channels_padded = weight_scales.numel();
455
+ float *const weight_scales_data = weight_scales.data_ptr<float>();
456
+ if (static_cast<int64_t>(requant_scales.size()) < num_output_channels_padded) {
457
+ requant_scales.resize(num_output_channels_padded);
458
+ }
459
+ for (const auto i : c10::irange(num_output_channels_padded)) {
460
+ const auto inverse_output_scale = 1.f /output_scale;
461
+ requant_scales[i] = (weight_scales_data[i] * input_scale) * inverse_output_scale;
462
+ TORCH_CHECK(
463
+ (requant_scales[i] > 0.0f && std::isnormal(requant_scales[i])),
464
+ "failed to create op with requantization scale: ",
465
+ requant_scales[i],
466
+ ": requantization scale must be finite and positive");
467
+ }
468
+ return requant_scales;
469
+ }
470
+
471
+ C10_UNUSED std::pair<std::vector<uint8_t>, at::Tensor> make_zero_points_and_scales_tensor(
472
+ const at::Tensor& weight_contig,
473
+ bool transpose = false,
474
+ uint32_t groups = 1
475
+ ) {
476
+ const int out_ch_idx = transpose ? 1 : 0;
477
+ const auto num_output_channels = weight_contig.size(out_ch_idx) * (transpose ? groups : 1);
478
+ // Add 8 to account for bufferring needed by QNNPACK.
479
+ const auto num_output_channels_padded = num_output_channels + kPaddingChannels;
480
+ const auto qtype = weight_contig.qscheme();
481
+ std::vector<uint8_t> weight_zp(num_output_channels_padded, 0);
482
+ // Adjust weight zero point, similar to weight data.
483
+ if (qtype == at::kPerTensorAffine) {
484
+ for (const auto i : c10::irange(num_output_channels)) {
485
+ weight_zp[i] = (uint8_t)(weight_contig.q_zero_point() + 128);
486
+ }
487
+ } else if (qtype == at::kPerChannelAffine) {
488
+ TORCH_CHECK(
489
+ weight_contig.q_per_channel_zero_points().scalar_type() == at::kLong,
490
+ "Per channel zero points dtype must be long int.");
491
+ const int64_t* per_channel_zero_points =
492
+ weight_contig.q_per_channel_zero_points().data_ptr<int64_t>();
493
+ for (const auto i : c10::irange(num_output_channels)) {
494
+ weight_zp[i] = (uint8_t)(per_channel_zero_points[i] + 128);
495
+ }
496
+ } else {
497
+ TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme.");
498
+ }
499
+ at:: Tensor weight_scales =
500
+ at::empty(
501
+ {num_output_channels_padded},
502
+ at::device(at::kCPU).dtype(at::kFloat));
503
+ float *const weight_scales_data = weight_scales.data_ptr<float>();
504
+ if (qtype == at::kPerTensorAffine) {
505
+ for (const auto i : c10::irange(num_output_channels)) {
506
+ weight_scales_data[i] = weight_contig.q_scale();
507
+ }
508
+ } else if (qtype == at::kPerChannelAffine) {
509
+ TORCH_CHECK(
510
+ weight_contig.q_per_channel_scales().scalar_type() == at::kDouble,
511
+ "Per channel scales dtype must be double.");
512
+ const double *const per_channel_scales =
513
+ weight_contig.q_per_channel_scales().data_ptr<double>();
514
+ for (const auto i : c10::irange(num_output_channels)) {
515
+ weight_scales_data[i] = static_cast<float>(per_channel_scales[i]);
516
+ }
517
+ } else {
518
+ TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme.");
519
+ }
520
+ for (const auto i : c10::irange(num_output_channels, num_output_channels_padded)) {
521
+ weight_scales_data[i] = 1.f;
522
+ }
523
+ return {weight_zp, weight_scales};
524
+ }
525
+ } // namespace
526
+
527
+ #endif
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantUtils.h ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/core/List.h>
5
+ #include <ATen/TensorOperators.h>
6
+ #include <c10/util/irange.h>
7
+ #include <algorithm>
8
+ #include <cmath>
9
+
10
+ #ifndef AT_PER_OPERATOR_HEADERS
11
+ #include <ATen/Functions.h>
12
+ #include <ATen/NativeFunctions.h>
13
+ #else
14
+ #include <ATen/ops/quantize_per_tensor_native.h>
15
+ #include <ATen/ops/quantize_per_channel_native.h>
16
+ #include <ATen/ops/zeros.h>
17
+ #endif
18
+
19
+ namespace quant_utils {
20
+ namespace {
21
+ float RawUint16ToFp16(unsigned short value) {
22
+ // Convert raw 16 bits half precision floating point number
23
+ // to single precision floating point number.
24
+ const unsigned short sign_bits = value >> 15;
25
+ const unsigned short exponent_bits = value >> 10 & 0x1f;
26
+ const unsigned short significand_bits = value & 0x3ff;
27
+
28
+ const float sign = sign_bits ? -1 : 1;
29
+ const float significand =
30
+ 1 + significand_bits * 0.0009765625f; // 0.0009765625f = 0x1p-10 = 2^-10;
31
+ const float exponent = exponent_bits - 0xf;
32
+
33
+ return sign * std::ldexp(significand, exponent);
34
+ }
35
+
36
+ template <typename T>
37
+ bool CheckAndSaturate(T max_val, T* element) {
38
+ if (*element > max_val) {
39
+ *element = max_val;
40
+ return true;
41
+ }
42
+ if (*element < -max_val) {
43
+ *element = -max_val;
44
+ return true;
45
+ }
46
+ return false;
47
+ }
48
+ }
49
+ using namespace std;
50
+ // A structure to hold quantization parameters 'scale' and 'zero_point'.
51
+ // The meaning of these values is as the constants in the quantization equation
52
+ //
53
+ // real_value = scale * (quantized_value - zero_point)
54
+ //
55
+ // In other words, 'zero_point' is the quantized value that corresponds
56
+ // to the real value 0, and 'scale' is the difference of real values
57
+ // corresponding to consecutive quantized values.
58
+ struct TensorQuantizationParams {
59
+ double scale;
60
+ std::int32_t zero_point;
61
+ int precision;
62
+ };
63
+
64
+ // Use fp16_min as the small scale cutoff because we don't want to use scales in
65
+ // fp16 subnormal range. This is to be consistent with Glow and FakeLowP
66
+ // implementation for NNPI.
67
+ constexpr float SMALL_SCALE_THRESHOLD = 6.1e-5f;
68
+
69
+ // Following implementation should be identical to fbgemm::ChooseQuantizationParams
70
+ inline TensorQuantizationParams ChooseQuantizationParams(
71
+ float min,
72
+ float max,
73
+ int32_t qmin,
74
+ int32_t qmax,
75
+ bool preserve_sparsity = false,
76
+ bool force_scale_power_of_two = false,
77
+ bool reduce_range = false) {
78
+ TORCH_CHECK(
79
+ min <= max,
80
+ "In ChooseQuantizationParams, min should be less than or equal to max");
81
+
82
+ if (reduce_range) {
83
+ qmin = qmin/2;
84
+ qmax = qmax/2;
85
+ }
86
+ if (min < 0 && max > 0 && preserve_sparsity) {
87
+ int symmetric_qmin = -((qmax - qmin) / 2 + 1);
88
+ int symmetric_qmax = (qmax - qmin) / 2;
89
+ double max_scale =
90
+ std::max(fabs(min / symmetric_qmin), fabs(max / symmetric_qmax));
91
+ min = max_scale * symmetric_qmin;
92
+ max = max_scale * symmetric_qmax;
93
+ }
94
+
95
+ // We extend the [min, max] interval to ensure that it contains 0.
96
+ // Otherwise, we would not meet the requirement that 0 be an exactly
97
+ // representable value.
98
+ min = std::min(min, 0.f);
99
+ max = std::max(max, 0.f);
100
+
101
+ TORCH_CHECK(
102
+ qmin < qmax,
103
+ "In ChooseQuantizationParams, qmin should be less than qmax");
104
+
105
+ // Use double precision for intermediate computation but use single precision
106
+ // in final number to reflect the actual number used during quantization.
107
+ double scale = (static_cast<double>(max) - min) / (qmax - qmin);
108
+ // If scale is 0 or too small so its reciprocal is infinity, we arbitrary
109
+ // adjust the scale to 0.1 . We want to avoid scale's reciprocal being
110
+ // infinity because some of fbgemm code pre-computes scale's reciprocal to do
111
+ // multiplication instead of division in the time critical part of code.
112
+ if (float(scale) == 0.0f || std::isinf(1.0f / float(scale))) {
113
+ scale = 0.1;
114
+ }
115
+ TORCH_CHECK(scale > 0, "quantization scale should be > 0");
116
+
117
+ if (force_scale_power_of_two) {
118
+ if (scale < 1) {
119
+ scale = 1.0 / (1 << static_cast<int>(floor(log(1.0 / scale) / log(2))));
120
+ } else {
121
+ scale = 1 << static_cast<int>(ceil(log(scale) / log(2)));
122
+ }
123
+ }
124
+
125
+ // Cut off small scale
126
+ if (scale < SMALL_SCALE_THRESHOLD) {
127
+ float org_scale = scale;
128
+ scale = SMALL_SCALE_THRESHOLD;
129
+ // Adjust the min and max based on the new scale
130
+ if (min == 0.0f) {
131
+ max = SMALL_SCALE_THRESHOLD * (qmax - qmin);
132
+ } else if (max == 0.0f) {
133
+ min = -SMALL_SCALE_THRESHOLD * (qmax - qmin);
134
+ } else {
135
+ float amplifier = SMALL_SCALE_THRESHOLD / org_scale;
136
+ min *= amplifier;
137
+ max *= amplifier;
138
+ }
139
+ }
140
+
141
+ // Zero-point computation.
142
+ // First the initial floating-point computation. The zero-point can be
143
+ // determined from solving an affine equation for any known pair
144
+ // (real value, corresponding quantized value).
145
+ // We know two such pairs: (rmin, qmin) and (rmax, qmax).
146
+ // The arithmetic error on the zero point computed from either pair
147
+ // will be roughly machine_epsilon * (sum of absolute values of terms)
148
+ // so we want to use the variant that adds the smaller terms.
149
+ double zero_point_from_min = qmin - min / static_cast<double>(scale);
150
+ double zero_point_from_max = qmax - max / static_cast<double>(scale);
151
+ double zero_point_from_min_error =
152
+ std::abs(qmin) - std::abs(min / static_cast<double>(scale));
153
+ double zero_point_from_max_error =
154
+ std::abs(qmax) - std::abs(max / static_cast<double>(scale));
155
+ double initial_zero_point =
156
+ zero_point_from_min_error < zero_point_from_max_error
157
+ ? zero_point_from_min
158
+ : zero_point_from_max;
159
+
160
+ // for symmetric quantization (preserve_sparsity == true), we force zero_point
161
+ // to be a middle value between qmin and qmax.
162
+ // If either min or max is 0, then we just use 0 as zero_point.
163
+ if (min < 0 && max > 0 && preserve_sparsity) {
164
+ initial_zero_point = static_cast<double>(qmin + qmax) / 2;
165
+ }
166
+
167
+ // Now we need to nudge the zero point to be an integer
168
+ // (our zero points are integer, and this is motivated by the requirement
169
+ // to be able to represent the real value "0" exactly as a quantized value,
170
+ // which is required in multiple places, for example in Im2col with zero
171
+ // padding).
172
+ int32_t nudged_zero_point = 0;
173
+ if (initial_zero_point < qmin) {
174
+ nudged_zero_point = qmin;
175
+ } else if (initial_zero_point > qmax) {
176
+ nudged_zero_point = qmax;
177
+ } else {
178
+ nudged_zero_point = nearbyint(initial_zero_point);
179
+ }
180
+
181
+ TensorQuantizationParams result;
182
+ result.scale = scale;
183
+ result.zero_point = nudged_zero_point;
184
+ return result;
185
+ }
186
+
187
+ // This function helps to convert the Conv1D dimensions usable by the Conv2d op.
188
+ constexpr int64_t kConv1dSqueezeDim = 0;
189
+ static C10_UNUSED torch::List<int64_t> MakeArgForConv1d(const torch::List<int64_t>& arg,
190
+ int64_t base_value) {
191
+ TORCH_CHECK(!arg.empty(), "Argument must have elements.");
192
+ torch::List<int64_t> result({arg.get(0), base_value});
193
+ if (arg.size() == 1) {
194
+ result[1] = arg.get(0);
195
+ } else {
196
+ result[1] = arg.get(1);
197
+ }
198
+ result[kConv1dSqueezeDim] = base_value;
199
+ return result;
200
+ }
201
+
202
+ // The range for using FP16 quantization of weights requires that the elements
203
+ // should be in the range of [5.96e-8, 65504]. If it is out of range, then the
204
+ // number will be saturated to max or min representable values by FP16.
205
+ inline void HandleWeightsSaturation(int64_t N, float* weight) {
206
+ const float kFp16Max = RawUint16ToFp16(0x7BFF);
207
+ bool found_out_of_range = false;
208
+ for (const auto i : c10::irange(N)) {
209
+ bool saturate = CheckAndSaturate<float>(kFp16Max, weight + i);
210
+ if (saturate) {
211
+ found_out_of_range = true;
212
+ }
213
+ }
214
+ if (found_out_of_range) {
215
+ TORCH_WARN("FOUND weight out of range ");
216
+ }
217
+ }
218
+
219
+ // Util function for quantizing bias.
220
+ inline at::Tensor QuantizeBias(
221
+ bool is_per_channel,
222
+ const at::Tensor& bias,
223
+ const at::Tensor& weight_contig,
224
+ double input_scale) {
225
+ at::Tensor qbias;
226
+ if (is_per_channel) {
227
+ auto bias_quant_scales =
228
+ weight_contig.q_per_channel_scales() * input_scale;
229
+ auto bias_zp = at::zeros(bias_quant_scales.sizes(), c10::kInt);
230
+ qbias = at::native::quantize_per_channel(
231
+ bias, bias_quant_scales, bias_zp, 0, c10::kQInt32);
232
+ } else {
233
+ qbias = at::native::quantize_per_tensor(
234
+ bias, weight_contig.q_scale() * input_scale, 0, c10::kQInt32);
235
+ }
236
+ return qbias;
237
+ }
238
+
239
+ } // namespace quant_utils
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantizedOps.h ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/core/IListRef.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/TensorIterator.h>
6
+ #include <ATen/native/Activation.h>
7
+ #include <ATen/native/DispatchStub.h>
8
+
9
+ namespace at {
10
+ namespace native {
11
+
12
+ using qrelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
13
+ using qrelu_leaky_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
14
+ const Scalar& /*negval_*/);
15
+ using qgelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, GeluType /* approximate */);
16
+ using qsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, double output_scale, int64_t output_zero_point);
17
+ using qhardsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
18
+ using qclamp_fn = void (*)(
19
+ const at::Tensor& /*qx*/,
20
+ const Scalar& min,
21
+ const Scalar& max,
22
+ at::Tensor& /*qy*/);
23
+ using qclamp_minmax_fn = void (*)(
24
+ const at::Tensor& /*qx*/,
25
+ const Scalar& /*min or max*/,
26
+ at::Tensor& /*qy*/);
27
+ using qthreshold_fn = void (*)(
28
+ const at::Tensor& /*qx*/,
29
+ const Scalar& threshold,
30
+ const Scalar& value,
31
+ at::Tensor& /*qy*/);
32
+ using qtanh_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
33
+ using qelu_fn = void(*)(
34
+ const at::Tensor& /*qx*/,
35
+ const Scalar& /*alpha*/,
36
+ const Scalar& /*scale*/,
37
+ const Scalar& /*input_scale*/,
38
+ at::Tensor& /*qy*/);
39
+ using qbinary_fn =
40
+ void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Tensor& /*other*/);
41
+ using qadd_scalar_fn =
42
+ void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Scalar& other /*other*/);
43
+ using qhardswish_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
44
+ using qdropout_fn = void(*)(
45
+ const at::Tensor& /*qx*/,
46
+ const Scalar& /*p*/,
47
+ bool training /*training*/,
48
+ at::Tensor& /*qy*/);
49
+ using qmaxpool_2d_fn = void (*)(
50
+ const Tensor& qx,
51
+ int64_t iC, // input/output channels
52
+ int64_t iH,
53
+ int64_t iW, // input sizes
54
+ int64_t oH,
55
+ int64_t oW, // output sizes
56
+ int64_t kH,
57
+ int64_t kW, // kernel size
58
+ int64_t sH,
59
+ int64_t sW, // strides
60
+ int64_t pH,
61
+ int64_t pW, // padding
62
+ int64_t dH,
63
+ int64_t dW, // dilation
64
+ Tensor& qy);
65
+ using qmaxpool_3d_fn = void (*)(
66
+ const Tensor& qx,
67
+ int64_t iC, // input/output channels
68
+ int64_t iT,
69
+ int64_t iH,
70
+ int64_t iW, // input sizes
71
+ int64_t oT,
72
+ int64_t oH,
73
+ int64_t oW, // output sizes
74
+ int64_t kT,
75
+ int64_t kH,
76
+ int64_t kW, // kernel size
77
+ int64_t sT,
78
+ int64_t sH,
79
+ int64_t sW, // strides
80
+ int64_t pT,
81
+ int64_t pH,
82
+ int64_t pW, // padding
83
+ int64_t dT,
84
+ int64_t dH,
85
+ int64_t dW, // dilation
86
+ Tensor& qy);
87
+ using qadaptive_avg_pool2d_fn = void (*)(
88
+ const Tensor& qx,
89
+ Tensor& qy,
90
+ int64_t sizeB,
91
+ int64_t sizeC,
92
+ int64_t isizeH,
93
+ int64_t isizeW,
94
+ int64_t osizeH,
95
+ int64_t osizeW,
96
+ int64_t istrideB,
97
+ int64_t istrideC,
98
+ int64_t istrideH,
99
+ int64_t istrideW);
100
+ using qadaptive_avg_pool3d_fn = void (*)(
101
+ const Tensor& qx,
102
+ Tensor& qy,
103
+ int64_t sizeB,
104
+ int64_t sizeC,
105
+ int64_t isizeD,
106
+ int64_t isizeH,
107
+ int64_t isizeW,
108
+ int64_t osizeD,
109
+ int64_t osizeH,
110
+ int64_t osizeW,
111
+ int64_t istrideB,
112
+ int64_t istrideC,
113
+ int64_t istrideD,
114
+ int64_t istrideH,
115
+ int64_t istrideW);
116
+ using qavg_pool2d_fn = void (*)(
117
+ const Tensor& qx,
118
+ Tensor& qy,
119
+ int64_t nBatch,
120
+ int64_t nInputPlane,
121
+ int64_t inputWidth,
122
+ int64_t inputHeight,
123
+ int64_t outputWidth,
124
+ int64_t outputHeight,
125
+ int kW,
126
+ int kH,
127
+ int dW,
128
+ int dH,
129
+ int padW,
130
+ int padH,
131
+ bool count_include_pad,
132
+ c10::optional<int64_t> divisor_override);
133
+
134
+ using qavg_pool3d_fn = void (*)(
135
+ const Tensor& qx,
136
+ Tensor& qy,
137
+ int64_t nBatch,
138
+ int64_t nInputPlane,
139
+ int64_t inputWidth,
140
+ int64_t inputHeight,
141
+ int64_t inputDepth,
142
+ int64_t outputWidth,
143
+ int64_t outputHeight,
144
+ int64_t outputDepth,
145
+ int kW,
146
+ int kH,
147
+ int kD,
148
+ int dW,
149
+ int dH,
150
+ int dD,
151
+ int padW,
152
+ int padH,
153
+ int padD,
154
+ bool count_include_pad,
155
+ c10::optional<int64_t> divisor_override);
156
+
157
+ using qupsample_bilinear2d_fn = void (*)(
158
+ Tensor& output,
159
+ const Tensor& input,
160
+ int64_t input_height,
161
+ int64_t input_width,
162
+ int64_t output_height,
163
+ int64_t output_width,
164
+ int64_t nbatch,
165
+ int64_t channels,
166
+ bool align_corners,
167
+ c10::optional<double> scales_h,
168
+ c10::optional<double> scales_w);
169
+
170
+ using qcat_nhwc_fn = Tensor (*)(
171
+ const MaterializedITensorListRef& qxs,
172
+ int64_t dim,
173
+ double scale,
174
+ int64_t zero_point);
175
+ using qtopk_fn = void(*)(Tensor&, Tensor&, const Tensor&, int64_t, int64_t, bool, bool);
176
+
177
+ using qbatch_norm_fn = void(*)(int64_t, int64_t, int64_t, int64_t, int64_t, const Tensor&, const Tensor&, const Tensor&, Tensor&);
178
+
179
+ using qnormalize_fn = void (*)(
180
+ const Tensor& /* X */,
181
+ const Tensor& /* gamma */,
182
+ const Tensor& /* beta */,
183
+ bool /* affine_per_channel */,
184
+ int /* num_channels */,
185
+ int /* num_groups */,
186
+ int64_t /* M */,
187
+ int64_t /* N */,
188
+ double /* eps */,
189
+ Tensor* /* Y */);
190
+
191
+ using qmean_inner_dim_fn = void (*)(
192
+ const Tensor& /* X */,
193
+ OptionalIntArrayRef /* opt_dim */,
194
+ bool /* keepdim */,
195
+ c10::optional<ScalarType> /* opt_dtype */,
196
+ Tensor& /* Y */);
197
+
198
+ using qstd_inner_dim_fn = void (*)(
199
+ const Tensor& /* X */,
200
+ OptionalIntArrayRef /* dim */,
201
+ const c10::optional<Scalar>& /* correction */,
202
+ bool /* keepdim */,
203
+ Tensor& /* Y */);
204
+
205
+ using qnormalize_nhwc_fn = void (*)(
206
+ const Tensor& /* X */,
207
+ const Tensor& /* gamma */,
208
+ const Tensor& /* beta */,
209
+ bool /* affine_per_channel */,
210
+ int /* num_channels */,
211
+ int /* num_groups */,
212
+ int64_t /* M */,
213
+ int64_t /* N */,
214
+ double /* eps */,
215
+ Tensor* /* Y */);
216
+
217
+ using qprelu_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
218
+ const Tensor& /*qw*/);
219
+
220
+ DECLARE_DISPATCH(qadaptive_avg_pool2d_fn, qadaptive_avg_pool2d_nhwc_stub);
221
+ DECLARE_DISPATCH(qadaptive_avg_pool3d_fn, qadaptive_avg_pool3d_ndhwc_stub);
222
+ DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_relu_stub);
223
+ DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_stub);
224
+ DECLARE_DISPATCH(qavg_pool2d_fn, qavg_pool2d_nhwc_stub);
225
+ DECLARE_DISPATCH(qavg_pool3d_fn, qavg_pool3d_nhwc_stub);
226
+ DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_relu_stub);
227
+ DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_stub);
228
+ DECLARE_DISPATCH(qbinary_fn, qadd_relu_stub);
229
+ DECLARE_DISPATCH(qbinary_fn, qadd_stub);
230
+ DECLARE_DISPATCH(qbinary_fn, qmul_relu_stub);
231
+ DECLARE_DISPATCH(qbinary_fn, qmul_stub);
232
+ DECLARE_DISPATCH(qcat_nhwc_fn, qcat_nhwc_stub);
233
+ DECLARE_DISPATCH(qcat_nhwc_fn, qcat_relu_nhwc_stub);
234
+ DECLARE_DISPATCH(qclamp_fn, qclamp_stub);
235
+ DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_min_stub);
236
+ DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_max_stub);
237
+ DECLARE_DISPATCH(qelu_fn, qelu_stub);
238
+ DECLARE_DISPATCH(qhardsigmoid_fn, qhardsigmoid_stub);
239
+ DECLARE_DISPATCH(qhardswish_fn, qhardswish_stub);
240
+ DECLARE_DISPATCH(qdropout_fn, qdropout_stub);
241
+ DECLARE_DISPATCH(qmaxpool_2d_fn, qmaxpool_2d_nhwc_stub);
242
+ DECLARE_DISPATCH(qmaxpool_3d_fn, qmaxpool_3d_nthwc_stub);
243
+ DECLARE_DISPATCH(qnormalize_fn, quantized_normalize_stub);
244
+ DECLARE_DISPATCH(qnormalize_nhwc_fn, quantized_groupnorm_nhwc_stub);
245
+ DECLARE_DISPATCH(qrelu_fn, qrelu_stub);
246
+ DECLARE_DISPATCH(qrelu_leaky_fn, qrelu_leaky_stub);
247
+ DECLARE_DISPATCH(qgelu_fn, qgelu_stub);
248
+ DECLARE_DISPATCH(qsigmoid_fn, qsigmoid_stub);
249
+ DECLARE_DISPATCH(qtanh_fn, qtanh_stub);
250
+ DECLARE_DISPATCH(qthreshold_fn, qthreshold_stub);
251
+ DECLARE_DISPATCH(qtopk_fn, qtopk_stub);
252
+ DECLARE_DISPATCH(qupsample_bilinear2d_fn, qupsample_bilinear2d_nhwc_stub);
253
+ DECLARE_DISPATCH(qmean_inner_dim_fn, qmean_inner_dim_stub);
254
+ DECLARE_DISPATCH(qstd_inner_dim_fn, qstd_inner_dim_stub);
255
+ DECLARE_DISPATCH(qprelu_fn, qprelu_stub);
256
+
257
+ } // namespace native
258
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/RuyUtils.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_RUY_QMATMUL
4
+
5
+ #include <ruy/ruy.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+ namespace ruy_utils {
10
+
11
+ ruy::Context* get_ruy_context();
12
+
13
+ void quantize_multiplier(double scale,
14
+ int* multiplier_fixedpoint,
15
+ int* multiplier_exponent);
16
+
17
+ } // namespace ruy_utils
18
+ } // namespace native
19
+ } // namespace
20
+
21
+ #endif // USE_RUY_QMATMUL
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/XnnpackUtils.h ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_XNNPACK
4
+ #include <cstdint>
5
+
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/native/xnnpack/Common.h>
8
+
9
+ using xnnpack_operator = at::native::xnnpack::Operator;
10
+
11
+ namespace at {
12
+ namespace native {
13
+ namespace xnnp_utils {
14
+
15
+ /*
16
+ * Return shape in the same order as the memory format
17
+ * e.g. channels_last will return NHWC instead of NCHW
18
+ */
19
+ std::vector<size_t> get_mem_format_aware_shape(const at::Tensor& in);
20
+
21
+ /*
22
+ * Input is always int8_t, output can be [int8_t, uint8_t].
23
+ * input + offset = output
24
+ * int8_t + 128 = uint8_t
25
+ * int8_t + 0 = int8_t
26
+ */
27
+ template <typename PT>
28
+ void q8_copy_int8_weight_and_add_offset(const at::Tensor& in, at::Tensor& out);
29
+
30
+ template <int kSpatialDim>
31
+ Tensor convert_conv_weights_to_channel_last_tensor(
32
+ const at::Tensor& src,
33
+ int groups,
34
+ bool transpose);
35
+
36
+ /*
37
+ * Series of create wrapper functions to call xnn_create_[de]conv* functions.
38
+ */
39
+ C10_ALWAYS_INLINE
40
+ enum xnn_status xnnp_create_convolution2d_nhwc(
41
+ uint32_t pad_top,
42
+ uint32_t pad_right,
43
+ uint32_t pad_bottom,
44
+ uint32_t pad_left,
45
+ uint32_t kernel_h,
46
+ uint32_t kernel_w,
47
+ uint32_t stride_h,
48
+ uint32_t stride_w,
49
+ uint32_t dilation_h,
50
+ uint32_t dilation_w,
51
+ uint32_t groups,
52
+ size_t group_input_channels,
53
+ size_t group_output_channels,
54
+ size_t ip_chan_stride,
55
+ size_t op_chan_stride,
56
+ int8_t izp,
57
+ float ip_scale,
58
+ int8_t kzp,
59
+ const float* k_scales,
60
+ const int8_t* kernel,
61
+ const int32_t* bias,
62
+ int8_t ozp,
63
+ float op_scale,
64
+ int8_t op_min,
65
+ int8_t op_max,
66
+ uint32_t flags,
67
+ xnn_operator_t* op,
68
+ bool per_channel,
69
+ bool transpose) {
70
+ /* Symmetric quantization forces kzp = 0 */
71
+ TORCH_CHECK(!kzp, "XNNPACK Q[SC]8 conv kernels expects kernel zero point to be zero."
72
+ "But got: ", kzp);
73
+
74
+ if (transpose) {
75
+ TORCH_CHECK(!per_channel, "XNNPACK Q[SC]8 does not have a per channel deconvolution!");
76
+ return xnn_create_deconvolution2d_nhwc_qs8(
77
+ pad_top, /* uint32_t output_padding_top */
78
+ pad_right, /* uint32_t output_padding_right */
79
+ pad_bottom, /* uint32_t output_padding_bottom */
80
+ pad_left, /* uint32_t output_padding_left */
81
+ kernel_h, /* uint32_t kernel_height */
82
+ kernel_w, /* uint32_t kernel_width */
83
+ stride_h, /* uint32_t stride_height */
84
+ stride_w, /* uint32_t stride_width */
85
+ dilation_h, /* uint32_t dilation_height */
86
+ dilation_w, /* uint32_t dilation_width */
87
+ groups, /* uint32_t groups */
88
+ group_input_channels, /* size_t group_input_channels */
89
+ group_output_channels, /* size_t group_output_channels */
90
+ ip_chan_stride, /* size_t input_pixel_stride */
91
+ op_chan_stride, /* size_t output_pixel_stride */
92
+ izp, /* int8_t input_zero_point */
93
+ ip_scale, /* float input_scale */
94
+ k_scales[0], /* float kernel_scale */
95
+ kernel, /* const int8_t* kernel */
96
+ bias, /* const int32_t* bias */
97
+ ozp, /* int8_t output_zero_point */
98
+ op_scale, /* float output_scale */
99
+ op_min, /* int8_t output_min */
100
+ op_max, /* int8_t output_max */
101
+ flags, /* uint32_t flags */
102
+ nullptr, /* xnn_caches_t caches */
103
+ nullptr, /* xnn_weights_cache_t weights_cache */
104
+ op); /* xnn_operator_t* deconvolution_op_out */
105
+
106
+ }
107
+
108
+ if (!per_channel) {
109
+ return xnn_create_convolution2d_nhwc_qs8(
110
+ pad_top, /* uint32_t input_padding_top */
111
+ pad_right, /* uint32_t input_padding_right */
112
+ pad_bottom, /* uint32_t input_padding_bottom */
113
+ pad_left, /* uint32_t input_padding_left */
114
+ kernel_h, /* uint32_t kernel_height */
115
+ kernel_w, /* uint32_t kernel_width */
116
+ stride_h, /* uint32_t subsampling_height */
117
+ stride_w, /* uint32_t subsampling_width */
118
+ dilation_h, /* uint32_t dilation_height */
119
+ dilation_w, /* uint32_t dilation_width */
120
+ groups, /* uint32_t groups */
121
+ group_input_channels, /* size_t group_input_channels */
122
+ group_output_channels, /* size_t group_output_channels*/
123
+ ip_chan_stride, /* size_t input_channel_stride */
124
+ op_chan_stride, /* size_t output_channel_stride */
125
+ izp, /* int8_t input_zero_point */
126
+ ip_scale, /* float input_scale */
127
+ k_scales[0], /* float kernel_scale */
128
+ kernel, /* const int8_t* kernel */
129
+ bias, /* const int32_t* bias */
130
+ ozp, /* int8_t output_zero_point */
131
+ op_scale, /* float output_scale */
132
+ op_min, /* int8_t output_min */
133
+ op_max, /* int8_t output_max */
134
+ flags, /* uint32_t flags */
135
+ nullptr, /* xnn_caches_t caches */
136
+ nullptr, /* xnn_weights_cache_t weights_cache */
137
+ op); /* xnn_operator_t* convolution_op_out */
138
+ } else { /* per_channel */
139
+ return xnn_create_convolution2d_nhwc_qs8_qc8w(
140
+ pad_top, /* uint32_t input_padding_top */
141
+ pad_right, /* uint32_t input_padding_right */
142
+ pad_bottom, /* uint32_t input_padding_bottom */
143
+ pad_left, /* uint32_t input_padding_left */
144
+ kernel_h, /* uint32_t kernel_height */
145
+ kernel_w, /* uint32_t kernel_width */
146
+ stride_h, /* uint32_t subsampling_height */
147
+ stride_w, /* uint32_t subsampling_width */
148
+ dilation_h, /* uint32_t dilation_height */
149
+ dilation_w, /* uint32_t dilation_width */
150
+ groups, /* uint32_t groups */
151
+ group_input_channels, /* size_t group_input_channels */
152
+ group_output_channels, /* size_t group_output_channels*/
153
+ ip_chan_stride, /* size_t input_channel_stride */
154
+ op_chan_stride, /* size_t output_channel_stride */
155
+ izp, /* int8_t input_zero_point */
156
+ ip_scale, /* float input_scale */
157
+ k_scales, /* const float* kernel_scale */
158
+ kernel, /* const int8_t* kernel */
159
+ bias, /* const int32_t* bias */
160
+ ozp, /* int8_t output_zero_point */
161
+ op_scale, /* float output_scale */
162
+ op_min, /* int8_t output_min */
163
+ op_max, /* int8_t output_max */
164
+ flags, /* uint32_t flags */
165
+ nullptr, /* xnn_caches_t caches */
166
+ nullptr, /* xnn_weights_cache_t weights_cache */
167
+ op); /* xnn_operator_t* convolution_op_out */
168
+ }
169
+ }
170
+
171
+ /*
172
+ * Series of reshape wrapper functions to call xnn_reshape_[de]conv* functions.
173
+ */
174
+ C10_ALWAYS_INLINE
175
+ enum xnn_status xnnp_reshape_convolution2d_nhwc(
176
+ xnn_operator_t op,
177
+ size_t batch,
178
+ size_t in_h,
179
+ size_t in_w,
180
+ pthreadpool_t pt_pool,
181
+ bool per_channel = false,
182
+ bool transpose = false,
183
+ uint32_t adj_h = 0,
184
+ uint32_t adj_w = 0) {
185
+ if(transpose) {
186
+ TORCH_CHECK(!per_channel, "XNNPACK Q[SC]8 does not have a per channel deconvolution!");
187
+ return xnn_reshape_deconvolution2d_nhwc_qs8(
188
+ op, /* xnn_operator_t deconvolution_op */
189
+ batch, /* size_t batch_size */
190
+ in_h, /* size_t input_height */
191
+ in_w, /* size_t input_width */
192
+ adj_h, /* uint32_t adjustment_height */
193
+ adj_w, /* uint32_t adjustment_width */
194
+ nullptr, /* size_t* output_height_out */
195
+ nullptr, /* size_t* output_width_out */
196
+ pt_pool); /* pthreadpool_t threadpool */
197
+ }
198
+
199
+ size_t workspace_size = SIZE_MAX;
200
+ size_t workspace_alignment = SIZE_MAX;
201
+
202
+ if (!per_channel) {
203
+ return xnn_reshape_convolution2d_nhwc_qs8(
204
+ op, /* xnn_operator_t convolution_op */
205
+ batch, /* size_t batch_size */
206
+ in_h, /* size_t input_height */
207
+ in_w, /* size_t input_width */
208
+ &workspace_size, /* size_t* workspace_size */
209
+ &workspace_alignment, /* size_t* workspace_alignment */
210
+ nullptr, /* size_t* output_height_out */
211
+ nullptr, /* size_t* output_width_out */
212
+ pt_pool); /* pthreadpool_t threadpool */
213
+ } else { /* per_channel */
214
+ return xnn_reshape_convolution2d_nhwc_qs8_qc8w(
215
+ op, /* xnn_operator_t convolution_op */
216
+ batch, /* size_t batch_size */
217
+ in_h, /* size_t input_height */
218
+ in_w, /* size_t input_width */
219
+ &workspace_size, /* size_t* workspace_size */
220
+ &workspace_alignment, /* size_t* workspace_alignment */
221
+ nullptr, /* size_t* output_height_out */
222
+ nullptr, /* size_t* output_width_out */
223
+ pt_pool); /* pthreadpool_t threadpool */
224
+ }
225
+ }
226
+
227
+
228
+ /*
229
+ * Series of setup wrapper functions to call xnn_setup_[de]conv* functions.
230
+ */
231
+ C10_ALWAYS_INLINE
232
+ enum xnn_status xnnp_setup_convolution2d_nhwc(
233
+ xnn_operator_t op,
234
+ const int8_t* inp,
235
+ int8_t* outp,
236
+ bool per_channel = false,
237
+ bool transpose = false) {
238
+ if(transpose) {
239
+ TORCH_CHECK(!per_channel, "XNNPACK Q[SC]8 does not have a per channel deconvolution!");
240
+
241
+ return xnn_setup_deconvolution2d_nhwc_qs8(
242
+ op, /* xnn_operator_t deconvolution_op */
243
+ inp, /* const int8_t* input */
244
+ outp); /* int8_t* output */
245
+ }
246
+
247
+ if (!per_channel) {
248
+ return xnn_setup_convolution2d_nhwc_qs8(
249
+ op, /* xnn_operator_t deconvolution_op */
250
+ nullptr, /* void workspace */
251
+ inp, /* const int8_t* input */
252
+ outp); /* int8_t* output */
253
+ } else { /* per_channel */
254
+ return xnn_setup_convolution2d_nhwc_qs8_qc8w(
255
+ op, /* xnn_operator_t deconvolution_op */
256
+ nullptr, /* void workspace */
257
+ inp, /* const int8_t* input */
258
+ outp); /* int8_t* output */
259
+ }
260
+ }
261
+
262
+
263
+ /*
264
+ * Series of wrapper functions to call xnn_create* and xnn_setup*
265
+ * functions for linear
266
+ */
267
+ C10_ALWAYS_INLINE
268
+ enum xnn_status xnnp_create_fully_connected_nc(
269
+ size_t input_channels,
270
+ size_t output_channels,
271
+ size_t input_stride,
272
+ size_t output_stride,
273
+ int8_t input_zero_point,
274
+ float input_scale,
275
+ int8_t kernel_zero_point,
276
+ float kernel_scale,
277
+ const int8_t* kernel,
278
+ const int32_t* bias,
279
+ int8_t output_zero_point,
280
+ float output_scale,
281
+ int8_t output_min,
282
+ int8_t output_max,
283
+ uint32_t flags,
284
+ xnn_operator_t* fully_connected_op_out) {
285
+ /* Symmetric quantization forces kzp = 0 */
286
+ TORCH_CHECK(!kernel_zero_point, "XNNPACK QS8 linear kernel expects kernel zero point to be zero."
287
+ "But got: ", kernel_zero_point);
288
+ return xnn_create_fully_connected_nc_qs8(
289
+ input_channels, /* size_t input_channels */
290
+ output_channels, /* size_t output_channels */
291
+ input_stride, /* size_t input_stride */
292
+ output_stride, /* size_t output_stride */
293
+ input_zero_point, /* int8_t input_zero_point */
294
+ input_scale, /* float input_scale */
295
+ kernel_scale, /* float kernel_scale */
296
+ kernel, /* const int8_t* kernel */
297
+ bias, /* const int32_t* bias */
298
+ output_zero_point, /* int8_t output_zero_point */
299
+ output_scale, /* float output_scale */
300
+ output_min, /* int8_t output_min */
301
+ output_max, /* int8_t output_max */
302
+ flags, /* uint32_t flags */
303
+ nullptr, /* xnn_caches_t caches */
304
+ nullptr, /* xnn_weights_cache_t */
305
+ fully_connected_op_out); /* xnn_operator_t* fully_connected_op_out */
306
+ }
307
+
308
+ C10_ALWAYS_INLINE
309
+ enum xnn_status xnnp_reshape_fully_connected_nc(
310
+ xnn_operator_t fully_connected_op,
311
+ size_t batch_size,
312
+ pthreadpool_t threadpool) {
313
+ return xnn_reshape_fully_connected_nc_qs8(
314
+ fully_connected_op, /* xnn_operator_t fully_connected_op */
315
+ batch_size, /* size_t batch_size */
316
+ threadpool); /* pthreadpool_t threadpool */
317
+ }
318
+
319
+ C10_ALWAYS_INLINE
320
+ enum xnn_status xnnp_setup_fully_connected_nc(
321
+ xnn_operator_t fully_connected_op,
322
+ const int8_t* input,
323
+ int8_t* output) {
324
+ return xnn_setup_fully_connected_nc_qs8(
325
+ fully_connected_op, /* xnn_operator_t fully_connected_op */
326
+ input, /* const int8_t* input */
327
+ output /* int8_t* output */
328
+ );
329
+ }
330
+
331
+ } // namespace xnnp_utils
332
+ } // namespace native
333
+ } // namespace at
334
+
335
+ #endif // USE_XNNPACK
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/conv_serialization.h ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/core/List.h>
5
+ #include <ATen/native/quantized/cpu/fbgemm_utils.h>
6
+ #include <ATen/native/quantized/cpu/QnnpackUtils.h>
7
+ #include <ATen/native/quantized/cpu/OnednnUtils.h>
8
+ #include <c10/util/irange.h>
9
+ #if !defined(__s390x__) && !defined(__powerpc__)
10
+ #include <cpuinfo.h>
11
+ #endif
12
+
13
+ #ifndef AT_PER_OPERATOR_HEADERS
14
+ #include <ATen/Functions.h>
15
+ #else
16
+ #include <ATen/ops/from_blob.h>
17
+ #endif
18
+
19
+
20
+ #include <tuple>
21
+
22
+ /* Convolution prepacked parameters serialization.
23
+ *
24
+ * Version 1
25
+ *
26
+ * - Fields:
27
+ * 1. weight
28
+ * 2. bias
29
+ * 3. stride x kSpatialDim
30
+ * 4. padding x kSpatialDim
31
+ * 5. dilation x kSpatialDim
32
+ * 6. groups
33
+ *
34
+ * Version 2
35
+ *
36
+ * - Fields:
37
+ * 0. version (string)
38
+ * 1. list of non-optional tensors
39
+ * 0: packed parameters (int16_t)
40
+ * - kSpatialDim
41
+ * - stride x kSpatialDim
42
+ * - padding x kSpatialDim
43
+ * - dilation x kSpatialDim
44
+ * - output_padding x kSpatialDim
45
+ * - groups
46
+ * - transpose (0 or 1)
47
+ * 1: weight
48
+ * 2. list of optional tensors
49
+ * 0: bias
50
+ *
51
+ * Version 3
52
+ *
53
+ * - Fields:
54
+ * 0. version (int64_t)
55
+ * 1. list of int64_t configuration values
56
+ * - kSpatialDim
57
+ * - stride x kSpatialDim
58
+ * - padding x kSpatialDim
59
+ * - dilation x kSpatialDim
60
+ * - output_padding x kSpatialDim
61
+ * - groups
62
+ * - flags (bitmask)
63
+ * - (1 << 0) transpose (1 = yes)
64
+ * 2. list of optional tensors
65
+ * 0: None (helps with type inference)
66
+ * 1: weight (this must be present)
67
+ * 2: bias
68
+ */
69
+
70
+ using ConvParamsSerializationTypeV2 = std::tuple<
71
+ // version, for versions 2 and up
72
+ std::string,
73
+ // non-optional tensors
74
+ std::vector<at::Tensor>,
75
+ // optional tensors
76
+ std::vector<c10::optional<at::Tensor>>>;
77
+
78
+ using ConvParamsSerializationTypeV3 = std::tuple<
79
+ // version, int for versions 3 and up
80
+ int64_t,
81
+ // configuration values
82
+ std::vector<int64_t>,
83
+ // optional tensors
84
+ std::vector<c10::optional<at::Tensor>>>;
85
+
86
+ // Parses any historical conv packed params format into
87
+ // the current format.
88
+ template <uint32_t kSpatialDim>
89
+ ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) {
90
+
91
+ // determine the version based on IValue contents
92
+ int version = -1;
93
+ if (v.isTuple()) {
94
+ const auto& elements = v.toTupleRef().elements();
95
+ if (!elements.empty()) {
96
+ auto firstElement = elements[0];
97
+ if (firstElement.isTensor()) {
98
+ version = 1;
99
+ } else if (firstElement.isString()) {
100
+ const std::string& version_str = firstElement.toStringRef();
101
+ // note: not parsing the string to automatically handle bad
102
+ // inputs
103
+ if (version_str == "2") {
104
+ version = 2;
105
+ }
106
+ } else if (firstElement.isInt()) {
107
+ auto raw_version = firstElement.toInt();
108
+ if (raw_version == 3) {
109
+ version = 3;
110
+ }
111
+ }
112
+ }
113
+ }
114
+ TORCH_INTERNAL_ASSERT(version != -1, "Unable to parse serialization version");
115
+
116
+ if (version == 1) {
117
+ // version 1 - convert to version 3 manually
118
+
119
+ const auto& elements = v.toTupleRef().elements();
120
+
121
+ at::Tensor weight = elements[0].toTensor();
122
+ c10::optional<at::Tensor> bias = elements[1].toOptional<at::Tensor>();
123
+ torch::List<at::Tensor> stride_x_kSpatialDim = elements[2].toTensorList();
124
+ torch::List<at::Tensor> padding_x_kSpatialDim = elements[3].toTensorList();
125
+ torch::List<at::Tensor> dilation_x_kSpatialDim = elements[4].toTensorList();
126
+ at::Tensor groups = elements[5].toTensor();
127
+
128
+ std::vector<int64_t> config_vals;
129
+ config_vals.reserve(
130
+ stride_x_kSpatialDim.size() + padding_x_kSpatialDim.size() +
131
+ dilation_x_kSpatialDim.size() + kSpatialDim + 3);
132
+ config_vals.push_back(kSpatialDim);
133
+ for (const auto i : c10::irange(stride_x_kSpatialDim.size())) {
134
+ auto stride = stride_x_kSpatialDim.get(i);
135
+ config_vals.push_back(stride[0].item<int16_t>());
136
+ }
137
+ for (const auto i : c10::irange(padding_x_kSpatialDim.size())) {
138
+ auto padding = padding_x_kSpatialDim.get(i);
139
+ config_vals.push_back(padding[0].item<int16_t>());
140
+ }
141
+ for (const auto i : c10::irange(dilation_x_kSpatialDim.size())) {
142
+ auto dilation = dilation_x_kSpatialDim.get(i);
143
+ config_vals.push_back(dilation[0].item<int16_t>());
144
+ }
145
+ // output_padding does not exist in v1, so we fill in a default value
146
+ for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
147
+ config_vals.push_back(0);
148
+ }
149
+ config_vals.push_back(groups[0].item<int16_t>());
150
+ // transpose does not exist in v1, so we fill in a default value
151
+ config_vals.push_back(0);
152
+
153
+ std::vector<c10::optional<at::Tensor>> tensors;
154
+ tensors.emplace_back();
155
+ tensors.emplace_back(weight);
156
+ tensors.emplace_back(bias);
157
+
158
+ int64_t version = 3;
159
+ return std::tie(version, config_vals, tensors);
160
+ } else if (version == 2) {
161
+ // version 2
162
+ const auto& elements = v.toTupleRef().elements();
163
+ std::vector<at::Tensor> non_optional = elements[1].toTensorList().vec();
164
+ std::vector<c10::optional<at::Tensor>> optional;
165
+
166
+ if (elements[2].isTensorList()) {
167
+ for (const auto& elem : elements[2].toTensorList()) {
168
+ optional.emplace_back(static_cast<at::Tensor>(elem));
169
+ }
170
+ } else {
171
+ for (const auto& elem : elements[2].toList()) {
172
+ optional.emplace_back(static_cast<c10::IValue>(elem).toOptional<at::Tensor>());
173
+ }
174
+ }
175
+ // create default optional value for bias
176
+ if (optional.empty()) {
177
+ optional.emplace_back();
178
+ }
179
+
180
+ auto config_a = non_optional[0].accessor<int16_t, 1>();
181
+ std::vector<int64_t> config_vals;
182
+ config_vals.reserve(config_a.size(0));
183
+ for (const auto i : c10::irange(config_a.size(0))) {
184
+ config_vals.emplace_back(config_a[i]);
185
+ }
186
+
187
+ auto weight = non_optional[1];
188
+ auto bias = optional[0];
189
+
190
+ std::vector<c10::optional<at::Tensor>> tensors;
191
+ tensors.emplace_back();
192
+ tensors.emplace_back(weight);
193
+ tensors.emplace_back(bias);
194
+
195
+ int64_t version = 3;
196
+ return std::tie(version, config_vals, tensors);
197
+ } else if (version == 3) {
198
+ return v.to<ConvParamsSerializationTypeV3>();
199
+ } else {
200
+ TORCH_INTERNAL_ASSERT(false, "Unexpected serialized qconv version: ",
201
+ version);
202
+ }
203
+ }
204
+
205
+ #define QCONV_SERIALIZATION_VERSION 2
206
+
207
+ #if QCONV_SERIALIZATION_VERSION == 2
208
+ using ConvParamsSerializationType = ConvParamsSerializationTypeV2;
209
+
210
+ template <uint32_t kSpatialDim>
211
+ ConvParamsSerializationTypeV2 serialize_conv(
212
+ const c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>>& params) {
213
+
214
+ std::string version = "2";
215
+ std::vector<at::Tensor> non_optional;
216
+ std::vector<c10::optional<at::Tensor>> optional;
217
+
218
+ // create a packed int8_t tensor for conv params
219
+ std::vector<int16_t> params_vec;
220
+ params_vec.push_back(kSpatialDim);
221
+ auto stride = params->stride().vec();
222
+ params_vec.insert(params_vec.end(), stride.begin(), stride.end());
223
+ auto padding = params->padding().vec();
224
+ params_vec.insert(params_vec.end(), padding.begin(), padding.end());
225
+ auto dilation = params->dilation().vec();
226
+ params_vec.insert(params_vec.end(), dilation.begin(), dilation.end());
227
+ auto output_padding = params->output_padding().vec();
228
+ params_vec.insert(params_vec.end(), output_padding.begin(),
229
+ output_padding.end());
230
+ params_vec.push_back(params->groups());
231
+ params_vec.push_back(params->transpose());
232
+ int64_t vec_size = params_vec.size();
233
+ at::Tensor params_tensor = at::from_blob(
234
+ params_vec.data(), {vec_size},
235
+ at::TensorOptions().dtype(at::kShort))
236
+ // clone to retain ownership of the data
237
+ .clone();
238
+
239
+ auto [weight, bias] = params->unpack();
240
+
241
+ non_optional.emplace_back(std::move(params_tensor));
242
+ non_optional.emplace_back(std::move(weight));
243
+ optional.emplace_back(std::move(bias));
244
+
245
+ return std::tie(version, non_optional, optional);
246
+ }
247
+
248
+ #elif QCONV_SERIALIZATION_VERSION == 3
249
+ using ConvParamsSerializationType = ConvParamsSerializationTypeV3;
250
+
251
+ template <uint32_t kSpatialDim>
252
+ ConvParamsSerializationTypeV3 serialize_conv(
253
+ const c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>>& params) {
254
+ std::vector<int64_t> config_vals;
255
+ config_vals.push_back(kSpatialDim);
256
+ auto stride = params->stride().vec();
257
+ config_vals.insert(config_vals.end(), stride.begin(), stride.end());
258
+ auto padding = params->padding().vec();
259
+ config_vals.insert(config_vals.end(), padding.begin(), padding.end());
260
+ auto dilation = params->dilation().vec();
261
+ config_vals.insert(config_vals.end(), dilation.begin(), dilation.end());
262
+ auto output_padding = params->output_padding().vec();
263
+ config_vals.insert(config_vals.end(), output_padding.begin(),
264
+ output_padding.end());
265
+ config_vals.push_back(params->groups());
266
+ config_vals.push_back(params->transpose());
267
+
268
+ auto [weight, bias] = params->unpack();
269
+
270
+ std::vector<c10::optional<at::Tensor>> tensors;
271
+ tensors.emplace_back();
272
+ tensors.emplace_back(weight);
273
+ tensors.emplace_back(bias);
274
+
275
+ int64_t version = 3;
276
+ return std::tie(version, config_vals, tensors);
277
+ }
278
+
279
+ #else
280
+ #error "Invalid qconv serialization version."
281
+ #endif
282
+
283
+ template <uint32_t kSpatialDim>
284
+ c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
285
+ ConvParamsSerializationTypeV3 state) {
286
+ auto [version, config_vals, tensors] = state;
287
+ TORCH_INTERNAL_ASSERT(version == 3, "Unexpected serialized qconv version: ", version);
288
+
289
+ TORCH_CHECK(tensors.size() == 3, "Wrong number of tensors", tensors.size());
290
+ c10::optional<at::Tensor> weight = tensors[1];
291
+ c10::optional<at::Tensor> bias = tensors[2];
292
+ TORCH_INTERNAL_ASSERT(weight, "Weight should always be present in serialized qconv.");
293
+
294
+ torch::List<int64_t> stride, padding, output_padding, dilation;
295
+ // skip kSpatialDim
296
+ int idx = 1;
297
+ for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
298
+ stride.emplace_back(config_vals.at(idx));
299
+ idx++;
300
+ }
301
+ for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
302
+ padding.emplace_back(config_vals.at(idx));
303
+ idx++;
304
+ }
305
+ for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
306
+ dilation.emplace_back(config_vals.at(idx));
307
+ idx++;
308
+ }
309
+ for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
310
+ TORCH_INTERNAL_ASSERT(idx < static_cast<int64_t>(config_vals.size()),
311
+ "Unexpected index = ", idx, " for config_vals of size ",
312
+ config_vals.size());
313
+ output_padding.emplace_back(config_vals.at(idx));
314
+ idx++;
315
+ }
316
+ int64_t groups = config_vals.at(idx);
317
+ idx++;
318
+ int64_t flags = config_vals.at(idx);
319
+ idx++;
320
+ TORCH_INTERNAL_ASSERT(idx == static_cast<int64_t>(config_vals.size()),
321
+ "Unexpected length of config_vals, expected ",
322
+ idx,
323
+ " got ",
324
+ config_vals.size());
325
+
326
+ bool transpose = flags & (1 << 0);
327
+
328
+ int64_t other_flags = flags & ~(1 << 0);
329
+ TORCH_INTERNAL_ASSERT(other_flags == 0, "Unexpected flags set in ", flags, ".");
330
+
331
+ auto& ctx = at::globalContext();
332
+
333
+ #ifdef USE_FBGEMM
334
+ if (ctx.qEngine() == at::QEngine::X86) {
335
+ #if AT_MKLDNN_ENABLED()
336
+ bool use_onednn = onednn_utils::should_use_onednn_quant(
337
+ weight.value(), transpose, groups, output_padding);
338
+ if (use_onednn) {
339
+ return PackedConvWeightsOnednn<kSpatialDim>::prepack(
340
+ weight.value(),
341
+ bias,
342
+ stride,
343
+ padding,
344
+ output_padding,
345
+ dilation,
346
+ groups,
347
+ transpose
348
+ );
349
+ }
350
+ #endif
351
+ return PackedConvWeight<kSpatialDim>::prepack(
352
+ weight.value(),
353
+ bias,
354
+ stride,
355
+ padding,
356
+ output_padding,
357
+ dilation,
358
+ groups,
359
+ transpose
360
+ );
361
+ } // x86
362
+ #endif
363
+
364
+ #ifdef USE_FBGEMM
365
+ if (ctx.qEngine() == at::QEngine::FBGEMM) {
366
+ return PackedConvWeight<kSpatialDim>::prepack(
367
+ weight.value(),
368
+ bias,
369
+ stride,
370
+ padding,
371
+ output_padding,
372
+ dilation,
373
+ groups,
374
+ transpose
375
+ );
376
+ }
377
+ #endif // USE_FBGEMM
378
+ #ifdef USE_PYTORCH_QNNPACK
379
+ if (ctx.qEngine() == at::QEngine::QNNPACK) {
380
+ TORCH_CHECK(
381
+ kSpatialDim == 2,
382
+ "prepack/__setstate__: QNNPACK only supports Conv2d "
383
+ "now.");
384
+ return PackedConvWeightsQnnp<kSpatialDim>::prepack(
385
+ weight.value(),
386
+ bias,
387
+ stride,
388
+ padding,
389
+ output_padding,
390
+ dilation,
391
+ groups,
392
+ transpose
393
+ );
394
+ }
395
+ #endif // USE_PYTORCH_QNNPACK
396
+ #if AT_MKLDNN_ENABLED()
397
+ if (ctx.qEngine() == at::QEngine::ONEDNN) {
398
+ return PackedConvWeightsOnednn<kSpatialDim>::prepack(
399
+ weight.value(),
400
+ bias,
401
+ stride,
402
+ padding,
403
+ output_padding,
404
+ dilation,
405
+ groups,
406
+ transpose
407
+ );
408
+ }
409
+ #endif // AT_MKLDNN_ENABLED()
410
+ TORCH_CHECK(
411
+ false,
412
+ "Didn't find engine for when deserializing ConvPackedParams: ",
413
+ toString(ctx.qEngine()));
414
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/fbgemm_utils.h ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <ATen/native/quantized/PackedParams.h>
5
+ #include <ATen/native/quantized/cpu/EmbeddingPackedParams.h>
6
+ #include <c10/core/QScheme.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ #ifdef USE_FBGEMM
10
+ #include <fbgemm/Fbgemm.h>
11
+ #include <fbgemm/FbgemmFP16.h>
12
+ #include <fbgemm/QuantUtils.h>
13
+
14
+ // The struct for the packed weight matrix (PackBMatrix) and the corresponding
15
+ // column offsets used for the fully connect layer, which are both prepared in
16
+ // the prepacking step to save the computations in the inference. Note the
17
+ // column offsets include the sum of the B columns as well as the scalar term
18
+ // B_zero_point * K, whereas the row offsets created by
19
+ // PackAWithQuantRowOffset/PackAWithIm2Col/PackAWithRowOffset are only the sum
20
+ // of the A rows. The column offsets are needed for the asymmetric quantization
21
+ // (affine quantization) of input matrix.
22
+ // Note that in JIT mode we can think of a way to fuse col_offsets with bias.
23
+ struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase {
24
+ PackedLinearWeight(
25
+ std::unique_ptr<fbgemm::PackBMatrix<int8_t>> w,
26
+ c10::optional<at::Tensor> bias,
27
+ std::vector<int32_t> col_offsets,
28
+ std::vector<float> w_scale,
29
+ std::vector<int32_t> w_zp,
30
+ c10::QScheme q_scheme)
31
+ : w(std::move(w)),
32
+ bias_(std::move(bias)),
33
+ col_offsets(std::move(col_offsets)),
34
+ w_scale(std::move(w_scale)),
35
+ w_zp(std::move(w_zp)),
36
+ q_scheme(std::move(q_scheme)) {}
37
+ std::unique_ptr<fbgemm::PackBMatrix<int8_t>> w;
38
+ c10::optional<at::Tensor> bias_;
39
+ std::vector<int32_t> col_offsets;
40
+ std::vector<float> w_scale;
41
+ std::vector<int32_t> w_zp;
42
+ c10::QScheme q_scheme;
43
+
44
+ at::Tensor apply(
45
+ at::Tensor input,
46
+ double output_scale,
47
+ int64_t output_zero_point) override;
48
+
49
+ at::Tensor apply_relu(
50
+ at::Tensor input,
51
+ double output_scale,
52
+ int64_t output_zero_point) override;
53
+
54
+ at::Tensor& apply_out(
55
+ const at::Tensor& input,
56
+ double output_scale,
57
+ int64_t output_zero_point,
58
+ at::Tensor& output) override;
59
+
60
+ at::Tensor& apply_relu_out(
61
+ const at::Tensor& input,
62
+ double output_scale,
63
+ int64_t output_zero_point,
64
+ at::Tensor& output) override;
65
+
66
+ at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32(
67
+ at::Tensor input,
68
+ double input_scale,
69
+ int64_t input_zero_point) override;
70
+
71
+ at::Tensor apply_with_input_q_dq_qweight_dq_relu_output_fp32(
72
+ at::Tensor input,
73
+ double input_scale,
74
+ int64_t input_zero_point) override;
75
+
76
+ at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false)
77
+ override;
78
+
79
+ at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false)
80
+ override;
81
+
82
+ std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
83
+
84
+ c10::optional<at::Tensor> bias() override {
85
+ return bias_;
86
+ }
87
+
88
+ static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
89
+ at::Tensor weight,
90
+ c10::optional<at::Tensor> bias);
91
+
92
+ private:
93
+ template <bool ReluFused>
94
+ at::Tensor& apply_impl(
95
+ const at::Tensor& input,
96
+ double output_scale,
97
+ int64_t output_zero_point,
98
+ at::Tensor& output);
99
+
100
+ template <bool ReluFused>
101
+ at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32_impl(
102
+ const at::Tensor& input,
103
+ double input_scale,
104
+ int64_t input_zero_point);
105
+
106
+ template <bool ReluFused>
107
+ at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range = false);
108
+ };
109
+
110
+ struct TORCH_API PackedLinearWeightFp16 : public LinearPackedParamsBase {
111
+ PackedLinearWeightFp16(
112
+ std::unique_ptr<fbgemm::PackedGemmMatrixFP16> w,
113
+ c10::optional<at::Tensor> bias)
114
+ : w(std::move(w)), bias_(std::move(bias)) {}
115
+
116
+ std::unique_ptr<fbgemm::PackedGemmMatrixFP16> w;
117
+ c10::optional<at::Tensor> bias_;
118
+
119
+ at::Tensor apply(
120
+ at::Tensor /*input*/,
121
+ double /*output_scale*/,
122
+ int64_t /*output_zero_point*/) override {
123
+ TORCH_INTERNAL_ASSERT(false);
124
+ }
125
+ at::Tensor apply_relu(
126
+ at::Tensor /*input*/,
127
+ double /*output_scale*/,
128
+ int64_t /*output_zero_point*/) override {
129
+ TORCH_INTERNAL_ASSERT(false);
130
+ }
131
+
132
+ at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false)
133
+ override;
134
+ at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false)
135
+ override;
136
+
137
+ at::Tensor& apply_dynamic_out(
138
+ const at::Tensor& input,
139
+ at::Tensor& output,
140
+ bool reduce_range = false) override;
141
+ at::Tensor& apply_dynamic_relu_out(
142
+ const at::Tensor& input,
143
+ at::Tensor& output,
144
+ bool reduce_range = false) override;
145
+
146
+ std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
147
+
148
+ c10::optional<at::Tensor> bias() override {
149
+ return bias_;
150
+ }
151
+
152
+ static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
153
+ at::Tensor weight,
154
+ c10::optional<at::Tensor> bias);
155
+
156
+ void set_bias(c10::optional<at::Tensor> bias) override;
157
+
158
+ private:
159
+ template <bool ReluFused>
160
+ at::Tensor& apply_dynamic_impl(const at::Tensor& input, at::Tensor& output);
161
+ };
162
+
163
+ template <int kSpatialDim = 2>
164
+ struct TORCH_API PackedConvWeight : public ConvPackedParamsBase<kSpatialDim> {
165
+ PackedConvWeight(
166
+ std::unique_ptr<fbgemm::PackWeightsForConv<kSpatialDim>> w,
167
+ c10::optional<at::Tensor> bias,
168
+ torch::List<int64_t> stride,
169
+ torch::List<int64_t> padding,
170
+ torch::List<int64_t> output_padding,
171
+ torch::List<int64_t> dilation,
172
+ int64_t groups,
173
+ uint8_t transpose,
174
+ std::vector<int32_t> col_offsets,
175
+ std::vector<int64_t> kernel,
176
+ std::vector<float> w_scale,
177
+ std::vector<int32_t> w_zp,
178
+ c10::QScheme q_scheme)
179
+ : w(std::move(w)),
180
+ bias(std::move(bias)),
181
+ stride_(std::move(stride)),
182
+ padding_(std::move(padding)),
183
+ output_padding_(std::move(output_padding)),
184
+ dilation_(std::move(dilation)),
185
+ groups_(groups),
186
+ transpose_(transpose),
187
+ col_offsets(std::move(col_offsets)),
188
+ kernel(std::move(kernel)),
189
+ w_scale(std::move(w_scale)),
190
+ w_zp(std::move(w_zp)),
191
+ q_scheme(q_scheme) {}
192
+
193
+ std::unique_ptr<fbgemm::PackWeightsForConv<kSpatialDim>> w;
194
+ c10::optional<at::Tensor> bias;
195
+ torch::List<int64_t> stride_;
196
+ torch::List<int64_t> padding_;
197
+ torch::List<int64_t> output_padding_;
198
+ torch::List<int64_t> dilation_;
199
+ int64_t groups_;
200
+ uint8_t transpose_;
201
+ std::vector<int32_t> col_offsets;
202
+ std::vector<int64_t> kernel;
203
+ std::vector<float> w_scale;
204
+ std::vector<int32_t> w_zp;
205
+ c10::QScheme q_scheme;
206
+
207
+ at::Tensor apply(
208
+ const at::Tensor& input,
209
+ double output_scale,
210
+ int64_t output_zero_point) override;
211
+
212
+ at::Tensor apply_relu(
213
+ const at::Tensor& input,
214
+ double output_scale,
215
+ int64_t output_zero_point) override;
216
+
217
+ at::Tensor apply_dynamic(
218
+ const at::Tensor& input,
219
+ bool reduce_range) override;
220
+
221
+ std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
222
+
223
+ static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
224
+ at::Tensor weight,
225
+ c10::optional<at::Tensor> bias,
226
+ torch::List<int64_t> stride,
227
+ torch::List<int64_t> padding,
228
+ torch::List<int64_t> output_padding,
229
+ torch::List<int64_t> dilation,
230
+ int64_t groups,
231
+ bool transpose);
232
+
233
+ const float* GetBiasData(at::Tensor* bias);
234
+
235
+ void GetQuantizationParams(
236
+ float act_scale,
237
+ float out_scale,
238
+ std::vector<float>* output_multiplier_float,
239
+ std::vector<float>* act_times_w_scale);
240
+
241
+ torch::List<int64_t> stride() const override {
242
+ return stride_;
243
+ }
244
+
245
+ torch::List<int64_t> padding() const override {
246
+ return padding_;
247
+ }
248
+
249
+ torch::List<int64_t> output_padding() const override {
250
+ return output_padding_;
251
+ }
252
+
253
+ torch::List<int64_t> dilation() const override {
254
+ return dilation_;
255
+ }
256
+
257
+ int64_t groups() const override {
258
+ return groups_;
259
+ }
260
+
261
+ bool transpose() const override {
262
+ return (bool)transpose_;
263
+ }
264
+
265
+ private:
266
+ template <bool ReluFused>
267
+ at::Tensor apply_impl(
268
+ const at::Tensor& input,
269
+ double output_scale,
270
+ int64_t output_zero_point);
271
+ };
272
+
273
+ // PackWeight: Convert the weight from uint8 to int8.
274
+ inline void convert_uint8_int8(
275
+ int len,
276
+ const uint8_t* src_uint8,
277
+ int8_t* dst_int8) {
278
+ for (const auto i : c10::irange(len)) {
279
+ dst_int8[i] = static_cast<int8_t>(static_cast<int32_t>(src_uint8[i]) - 128);
280
+ }
281
+ }
282
+
283
+ // UnpackWeight: Convert the weight from int8 to uint8.
284
+ inline void convert_int8_uint8(
285
+ int len,
286
+ const int8_t* src_int8,
287
+ uint8_t* dst_uint8) {
288
+ for (const auto i : c10::irange(len)) {
289
+ dst_uint8[i] =
290
+ static_cast<uint8_t>(static_cast<int32_t>(src_int8[i]) + 128);
291
+ }
292
+ }
293
+
294
+ namespace at {
295
+ namespace native {
296
+ namespace fbgemm_utils {
297
+
298
+ template <int kSpatialDim = 2>
299
+ fbgemm::conv_param_t<kSpatialDim> MakeFbgemmConvParam(
300
+ int N,
301
+ int C,
302
+ int M,
303
+ const std::vector<int>& image_shape,
304
+ int groups,
305
+ const std::vector<int>& kernels,
306
+ const std::vector<int>& strides,
307
+ const std::vector<int>& pads,
308
+ const std::vector<int>& dilations,
309
+ const std::vector<int>& output_padding = std::vector<int>(kSpatialDim, 0),
310
+ bool transposed = false);
311
+
312
+ // TODO: Remove functions below when ChannelsLast3d is ready.
313
+ Tensor MakeStridedQTensorCPU(
314
+ const IntArrayRef& sizes,
315
+ const IntArrayRef& strides,
316
+ const TensorOptions& options,
317
+ QuantizerPtr quantizer);
318
+
319
+ Tensor MakeEmptyAffineQuantizedChannelsLast3dTensor(
320
+ int64_t N,
321
+ int64_t C,
322
+ int64_t D,
323
+ int64_t H,
324
+ int64_t W,
325
+ const TensorOptions& options,
326
+ double scale,
327
+ int64_t zero_point);
328
+
329
+ Tensor MakeEmptyPerChannelAffineQuantizedChannelsLast3dTensor(
330
+ int64_t N,
331
+ int64_t C,
332
+ int64_t D,
333
+ int64_t H,
334
+ int64_t W,
335
+ const TensorOptions& options,
336
+ const Tensor& scales,
337
+ const Tensor& zero_points);
338
+
339
+ Tensor ConvertToChannelsLast3dTensor(const Tensor& src);
340
+
341
+ template <int kSpatialDim = 2>
342
+ Tensor TransposeConvTensorUnpackConversion(const Tensor& src, int groups);
343
+
344
+ template <int kSpatialDim>
345
+ Tensor ConvertConvWeightsToChannelLastTensor(
346
+ const at::Tensor& src,
347
+ int groups,
348
+ bool transpose);
349
+ } // namespace fbgemm_utils
350
+ } // namespace native
351
+ } // namespace at
352
+
353
+ #endif // USE_FBGEMM
354
+
355
+ struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase {
356
+ PackedEmbeddingBagWeight(
357
+ at::Tensor packed_w,
358
+ std::vector<float> w_scale,
359
+ std::vector<float> w_zp,
360
+ int64_t bit_rate,
361
+ c10::QScheme q_scheme,
362
+ int64_t version)
363
+ : packed_w(std::move(packed_w)),
364
+ w_scale(std::move(w_scale)),
365
+ w_zp(std::move(w_zp)),
366
+ bit_rate_(bit_rate),
367
+ q_scheme(q_scheme),
368
+ version_(version) {
369
+ // NOLINTNEXTLINE(clang-analyzer-cplusplus.Move)
370
+ if (!packed_w.is_contiguous()) {
371
+ packed_w = packed_w.contiguous();
372
+ }
373
+ }
374
+
375
+ at::Tensor packed_w;
376
+ std::vector<float> w_scale;
377
+ std::vector<float> w_zp;
378
+ int64_t bit_rate_;
379
+ c10::QScheme q_scheme;
380
+ int64_t version_;
381
+
382
+ at::Tensor unpack() override;
383
+ static c10::intrusive_ptr<EmbeddingPackedParamsBase> prepack(
384
+ at::Tensor weight);
385
+
386
+ int64_t bit_rate() const override {
387
+ return bit_rate_;
388
+ }
389
+
390
+ int64_t version() const override {
391
+ return version_;
392
+ }
393
+
394
+ at::Tensor embeddingbag_byte(
395
+ const at::Tensor& indices,
396
+ const c10::optional<at::Tensor>& offsets,
397
+ bool pruned_weights,
398
+ const c10::optional<at::Tensor>& per_sample_weights_,
399
+ const c10::optional<at::Tensor>& compressed_indices_mapping,
400
+ bool include_last_offset,
401
+ bool is_embedding_op) override;
402
+
403
+ at::Tensor embeddingbag_4bit(
404
+ const at::Tensor& indices,
405
+ const c10::optional<at::Tensor>& offsets,
406
+ bool pruned_weights,
407
+ const c10::optional<at::Tensor>& per_sample_weights_,
408
+ const c10::optional<at::Tensor>& compressed_indices_mapping,
409
+ bool include_last_offset,
410
+ bool is_embedding_op) override;
411
+ };
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/init_qnnpack.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_PYTORCH_QNNPACK
4
+
5
+ namespace at {
6
+ namespace native {
7
+
8
+ void initQNNPACK();
9
+
10
+ } // namespace native
11
+ } // namespace at
12
+
13
+ #endif
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ namespace native {
7
+ Tensor& embedding_bag_byte_rowwise_offsets_out(
8
+ Tensor& output,
9
+ const Tensor& weight,
10
+ const Tensor& indices,
11
+ const c10::optional<Tensor>& offsets_in,
12
+ const bool /* scale_grad_by_freq */,
13
+ const int64_t /* mode */,
14
+ bool pruned_weights,
15
+ const c10::optional<Tensor>& per_sample_weights_,
16
+ const c10::optional<Tensor>& compressed_indices_mapping,
17
+ bool include_last_offset);
18
+
19
+ Tensor& embedding_bag_4bit_rowwise_offsets_out(
20
+ Tensor& output,
21
+ const Tensor& weight,
22
+ const Tensor& indices,
23
+ const c10::optional<Tensor>& offsets_in,
24
+ const bool /* scale_grad_by_freq */,
25
+ const int64_t /* mode */,
26
+ bool pruned_weights,
27
+ const c10::optional<Tensor>& per_sample_weights_,
28
+ const c10::optional<Tensor>& compressed_indices_mapping,
29
+ bool include_last_offset);
30
+
31
+ Tensor& qembeddingbag_byte_unpack_out(Tensor& output, const Tensor& packed_weight);
32
+
33
+ } // native
34
+ } // at
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag_prepack.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+
4
+ namespace at { namespace native {
5
+
6
+ Tensor& qembeddingbag_byte_prepack_out(Tensor& output, const Tensor& weight);
7
+
8
+ Tensor qembeddingbag_byte_prepack(const Tensor& weight);
9
+
10
+ Tensor qembeddingbag_byte_prepack_meta(const Tensor& weight);
11
+
12
+ } // namespace native
13
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
2
+ #include <thrust/binary_search.h>
3
+ #include <thrust/device_vector.h>
4
+ #include <thrust/execution_policy.h>
5
+ #include <thrust/functional.h>
6
+ #endif
7
+ namespace c10::cuda {
8
+ #ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
9
+ template <typename Iter, typename Scalar>
10
+ __forceinline__ __device__ Iter
11
+ lower_bound(Iter start, Iter end, Scalar value) {
12
+ return thrust::lower_bound(thrust::device, start, end, value);
13
+ }
14
+ #else
15
+ // thrust::lower_bound is broken on device, see
16
+ // https://github.com/NVIDIA/thrust/issues/1734 Implementation inspired by
17
+ // https://github.com/pytorch/pytorch/blob/805120ab572efef66425c9f595d9c6c464383336/aten/src/ATen/native/cuda/Bucketization.cu#L28
18
+ template <typename Iter, typename Scalar>
19
+ __device__ Iter lower_bound(Iter start, Iter end, Scalar value) {
20
+ while (start < end) {
21
+ auto mid = start + ((end - start) >> 1);
22
+ if (*mid < value) {
23
+ start = mid + 1;
24
+ } else {
25
+ end = mid;
26
+ }
27
+ }
28
+ return end;
29
+ }
30
+ #endif // THRUST_DEVICE_LOWER_BOUND_WORKS
31
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAllocatorConfig.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAMacros.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <cuda_runtime_api.h>
6
+
7
+ #include <atomic>
8
+ #include <cstddef>
9
+ #include <cstdlib>
10
+ #include <mutex>
11
+ #include <string>
12
+
13
+ namespace c10::cuda::CUDACachingAllocator {
14
+
15
+ // Environment config parser
16
+ class C10_CUDA_API CUDAAllocatorConfig {
17
+ public:
18
+ static size_t max_split_size() {
19
+ return instance().m_max_split_size;
20
+ }
21
+ static double garbage_collection_threshold() {
22
+ return instance().m_garbage_collection_threshold;
23
+ }
24
+
25
+ static bool expandable_segments() {
26
+ #ifndef PYTORCH_C10_DRIVER_API_SUPPORTED
27
+ if (instance().m_expandable_segments) {
28
+ TORCH_WARN_ONCE("expandable_segments not supported on this platform")
29
+ }
30
+ return false;
31
+ #else
32
+ return instance().m_expandable_segments;
33
+ #endif
34
+ }
35
+
36
+ static bool release_lock_on_cudamalloc() {
37
+ return instance().m_release_lock_on_cudamalloc;
38
+ }
39
+
40
+ /** Pinned memory allocator settings */
41
+ static bool pinned_use_cuda_host_register() {
42
+ return instance().m_pinned_use_cuda_host_register;
43
+ }
44
+
45
+ static size_t pinned_num_register_threads() {
46
+ return instance().m_pinned_num_register_threads;
47
+ }
48
+
49
+ static size_t pinned_max_register_threads() {
50
+ // Based on the benchmark results, we see better allocation performance
51
+ // with 8 threads. However on future systems, we may need more threads
52
+ // and limiting this to 128 threads.
53
+ return 128;
54
+ }
55
+
56
+ // This is used to round-up allocation size to nearest power of 2 divisions.
57
+ // More description below in function roundup_power2_next_division
58
+ // As ane example, if we want 4 divisions between 2's power, this can be done
59
+ // using env variable: PYTORCH_CUDA_ALLOC_CONF=roundup_power2_divisions:4
60
+ static size_t roundup_power2_divisions(size_t size);
61
+
62
+ static std::vector<size_t> roundup_power2_divisions() {
63
+ return instance().m_roundup_power2_divisions;
64
+ }
65
+
66
+ static std::string last_allocator_settings() {
67
+ std::lock_guard<std::mutex> lock(
68
+ instance().m_last_allocator_settings_mutex);
69
+ return instance().m_last_allocator_settings;
70
+ }
71
+
72
+ static CUDAAllocatorConfig& instance() {
73
+ static CUDAAllocatorConfig* s_instance = ([]() {
74
+ auto inst = new CUDAAllocatorConfig();
75
+ const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF");
76
+ inst->parseArgs(env);
77
+ return inst;
78
+ })();
79
+ return *s_instance;
80
+ }
81
+
82
+ void parseArgs(const char* env);
83
+
84
+ private:
85
+ CUDAAllocatorConfig();
86
+
87
+ static void lexArgs(const char* env, std::vector<std::string>& config);
88
+ static void consumeToken(
89
+ const std::vector<std::string>& config,
90
+ size_t i,
91
+ const char c);
92
+ size_t parseMaxSplitSize(const std::vector<std::string>& config, size_t i);
93
+ size_t parseGarbageCollectionThreshold(
94
+ const std::vector<std::string>& config,
95
+ size_t i);
96
+ size_t parseRoundUpPower2Divisions(
97
+ const std::vector<std::string>& config,
98
+ size_t i);
99
+ size_t parseAllocatorConfig(
100
+ const std::vector<std::string>& config,
101
+ size_t i,
102
+ bool& used_cudaMallocAsync);
103
+ size_t parsePinnedUseCudaHostRegister(
104
+ const std::vector<std::string>& config,
105
+ size_t i);
106
+ size_t parsePinnedNumRegisterThreads(
107
+ const std::vector<std::string>& config,
108
+ size_t i);
109
+
110
+ std::atomic<size_t> m_max_split_size;
111
+ std::vector<size_t> m_roundup_power2_divisions;
112
+ std::atomic<double> m_garbage_collection_threshold;
113
+ std::atomic<size_t> m_pinned_num_register_threads;
114
+ std::atomic<bool> m_expandable_segments;
115
+ std::atomic<bool> m_release_lock_on_cudamalloc;
116
+ std::atomic<bool> m_pinned_use_cuda_host_register;
117
+ std::string m_last_allocator_settings;
118
+ std::mutex m_last_allocator_settings_mutex;
119
+ };
120
+
121
+ // General caching allocator utilities
122
+ C10_CUDA_API void setAllocatorSettings(const std::string& env);
123
+
124
+ } // namespace c10::cuda::CUDACachingAllocator
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
5
+ #include <c10/cuda/CUDAMacros.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <c10/util/ApproximateClock.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <c10/util/Registry.h>
10
+
11
+ #include <array>
12
+ #include <atomic>
13
+ #include <cstddef>
14
+ #include <cstdint>
15
+ #include <functional>
16
+ #include <memory>
17
+ #include <string>
18
+ #include <unordered_set>
19
+ #include <utility>
20
+
21
+ namespace c10 {
22
+
23
+ // Caching allocator will execute every registered callback if it unable to find
24
+ // block inside of already allocated area.
25
+ class C10_CUDA_API FreeMemoryCallback {
26
+ public:
27
+ virtual ~FreeMemoryCallback() = default;
28
+ virtual bool Execute() = 0;
29
+ };
30
+
31
+ C10_DECLARE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback);
32
+ #define REGISTER_FREE_MEMORY_CALLBACK(name, ...) \
33
+ C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__);
34
+ } // namespace c10
35
+ //
36
+ // TODO: Turn this into an honest to goodness class. I briefly attempted to do
37
+ // this, but it was a bit irritating to figure out how to also correctly
38
+ // apply pimpl pattern so I didn't have to leak any internal implementation
39
+ // details in the header (CUDACachingAllocator could be made a pimpl, but
40
+ // you also need to appropriately define a class which is a subclass
41
+ // of Allocator. Not impossible, but required a bit more surgery than
42
+ // I wanted to do at the time.)
43
+ //
44
+ // Why is this using a namespace rather than old-style THCCachingAllocator_
45
+ // prefix? Mostly because it made the HIPify rules easier to write; _ is
46
+ // not counted as a word boundary, so you would otherwise have to list each
47
+ // of these functions.
48
+
49
+ namespace c10::cuda::CUDACachingAllocator {
50
+
51
+ extern const size_t kLargeBuffer;
52
+
53
+ struct Stat {
54
+ int64_t current = 0;
55
+ int64_t peak = 0;
56
+ int64_t allocated = 0;
57
+ int64_t freed = 0;
58
+ };
59
+
60
+ enum struct StatType : uint64_t {
61
+ AGGREGATE = 0,
62
+ SMALL_POOL = 1,
63
+ LARGE_POOL = 2,
64
+ NUM_TYPES = 3 // remember to update this whenever a new stat type is added
65
+ };
66
+
67
+ typedef std::array<Stat, static_cast<size_t>(StatType::NUM_TYPES)> StatArray;
68
+
69
+ // Struct containing memory allocator summary statistics for a device.
70
+ struct DeviceStats {
71
+ // COUNT: allocations requested by client code
72
+ StatArray allocation;
73
+ // COUNT: number of allocated segments from cudaMalloc().
74
+ StatArray segment;
75
+ // COUNT: number of active memory blocks (allocated or used by stream)
76
+ StatArray active;
77
+ // COUNT: number of inactive, split memory blocks (unallocated but can't be
78
+ // released via cudaFree)
79
+ StatArray inactive_split;
80
+
81
+ // SUM: bytes allocated by this memory alocator
82
+ StatArray allocated_bytes;
83
+ // SUM: bytes reserved by this memory allocator (both free and used)
84
+ StatArray reserved_bytes;
85
+ // SUM: bytes within active memory blocks
86
+ StatArray active_bytes;
87
+ // SUM: bytes within inactive, split memory blocks
88
+ StatArray inactive_split_bytes;
89
+ // SUM: bytes requested by client code
90
+ StatArray requested_bytes;
91
+
92
+ // COUNT: total number of failed calls to CUDA malloc necessitating cache
93
+ // flushes.
94
+ int64_t num_alloc_retries = 0;
95
+
96
+ // COUNT: total number of OOMs (i.e. failed calls to CUDA after cache flush)
97
+ int64_t num_ooms = 0;
98
+
99
+ // COUNT: total number of oversize blocks allocated from pool
100
+ Stat oversize_allocations;
101
+
102
+ // COUNT: total number of oversize blocks requiring malloc
103
+ Stat oversize_segments;
104
+
105
+ // COUNT: total number of synchronize_and_free_events() calls
106
+ int64_t num_sync_all_streams = 0;
107
+
108
+ // COUNT: total number of CUDA allocation calls. This includes both cuMemMap
109
+ // and cudaMalloc.
110
+ int64_t num_device_alloc = 0;
111
+
112
+ // COUNT: total number of CUDA free calls. This includes both cuMemUnmap
113
+ // and cudaFree.
114
+ int64_t num_device_free = 0;
115
+
116
+ // SIZE: maximum block size that is allowed to be split.
117
+ int64_t max_split_size = 0;
118
+ };
119
+
120
+ typedef std::shared_ptr<GatheredContext> (*CreateContextFn)();
121
+
122
+ // Struct containing info of an allocation block (i.e. a fractional part of a
123
+ // cudaMalloc)..
124
+ struct BlockInfo {
125
+ int64_t size = 0;
126
+ int64_t requested_size = 0;
127
+ int32_t gc_counter = 0;
128
+ bool allocated = false;
129
+ bool active = false;
130
+ std::shared_ptr<GatheredContext>
131
+ context_when_allocated; // per-watcher context
132
+ };
133
+
134
+ // Struct containing info of a memory segment (i.e. one contiguous cudaMalloc).
135
+ struct SegmentInfo {
136
+ c10::DeviceIndex device = 0;
137
+ int64_t address = 0;
138
+ int64_t total_size = 0;
139
+ int64_t requested_size = 0; // unrounded, actually requested size
140
+ int64_t allocated_size = 0;
141
+ int64_t active_size = 0;
142
+ cudaStream_t stream = nullptr;
143
+ bool is_large = false;
144
+ bool is_expandable = false;
145
+ MempoolId_t owner_private_pool_id = {0, 0};
146
+ std::vector<BlockInfo> blocks;
147
+ std::shared_ptr<GatheredContext> context_when_allocated;
148
+ };
149
+
150
+ struct AllocatorState {
151
+ virtual ~AllocatorState() = default;
152
+ };
153
+
154
+ union trace_time_ {
155
+ time_t t_;
156
+ approx_time_t approx_t_;
157
+ };
158
+
159
+ struct TraceEntry {
160
+ enum Action {
161
+ ALLOC, // API made to the caching allocator for new memory
162
+ FREE_REQUESTED, // API call made to the caching allocator to free memory
163
+ FREE_COMPLETED, // The allocator might have to delay a free because
164
+ // it is still in use on another stream via record_stream
165
+ // This event is generated when a free actually completes.
166
+ SEGMENT_ALLOC, // a call to cudaMalloc to get more memory from the OS
167
+ SEGMENT_FREE, // a call to cudaFree to return memory to the OS (e.g. to
168
+ // defragment or empty_caches)
169
+ SEGMENT_MAP, // a call to cuMemMap (used with expandable_segments)
170
+ SEGMENT_UNMAP, // unmap part of a segment (used with expandable segments)
171
+ SNAPSHOT, // a call to snapshot, used to correlate memory snapshots to trace
172
+ // events
173
+ OOM // the allocator threw an OutOfMemoryError (addr_ is the amount of free
174
+ // bytes reported by cuda)
175
+ };
176
+ TraceEntry(
177
+ Action action,
178
+ c10::DeviceIndex device,
179
+ int64_t addr,
180
+ size_t size,
181
+ cudaStream_t stream,
182
+ approx_time_t time,
183
+ std::shared_ptr<GatheredContext> context = nullptr)
184
+ : action_(action),
185
+ device_(device),
186
+ addr_(addr),
187
+ context_(std::move(context)),
188
+ stream_(stream),
189
+ size_(static_cast<int64_t>(size)) {
190
+ time_.approx_t_ = time;
191
+ }
192
+ Action action_;
193
+ c10::DeviceIndex device_;
194
+ int64_t addr_; // for OOM, this is the amount of free bytes reported by cuda
195
+ std::shared_ptr<GatheredContext> context_;
196
+ cudaStream_t stream_{};
197
+ int64_t size_;
198
+ trace_time_ time_{};
199
+ };
200
+
201
+ struct AllocatorConfigInfo {
202
+ double garbage_collection_threshold;
203
+ size_t max_split_size;
204
+ size_t pinned_num_register_threads;
205
+ bool expandable_segments;
206
+ bool release_lock_on_malloc;
207
+ bool pinned_use_host_register;
208
+ std::string last_allocator_settings;
209
+ std::vector<size_t> roundup_power2_divisions;
210
+ };
211
+
212
+ struct SnapshotInfo {
213
+ std::vector<SegmentInfo> segments;
214
+ std::vector<std::vector<TraceEntry>> device_traces;
215
+ AllocatorConfigInfo config_metadata;
216
+ };
217
+
218
+ // returns the pointers freed in the pool
219
+ // and the pointers allocated. Note: a pointer
220
+ // may appear in both freed and allocated
221
+ struct CheckpointDelta {
222
+ std::vector<void*> ptrs_freed;
223
+ std::vector<at::DataPtr> dataptrs_allocd;
224
+ };
225
+
226
+ enum struct RecordContext {
227
+ NEVER = 0,
228
+ STATE = 1, // only keep stacks for active allocations
229
+ ALLOC = 2, // additionally keep stacks for allocations in the trace history
230
+ ALL = 3, // additionally record stacks for when something is freed
231
+ };
232
+
233
+ // Size pretty-printer
234
+ std::string format_size(uint64_t size);
235
+
236
+ using OutOfMemoryObserver = std::function<void(
237
+ int64_t device,
238
+ int64_t allocated,
239
+ int64_t device_total,
240
+ int64_t device_free)>;
241
+
242
+ using AllocatorTraceTracker = std::function<void(const TraceEntry&)>;
243
+
244
+ class CUDAAllocator : public Allocator {
245
+ public:
246
+ virtual void* raw_alloc(size_t nbytes) = 0;
247
+ virtual void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) = 0;
248
+ virtual void raw_delete(void* ptr) = 0;
249
+ virtual void init(int device_count) = 0;
250
+ virtual bool initialized() = 0;
251
+ virtual void setMemoryFraction(double fraction, c10::DeviceIndex device) = 0;
252
+ virtual void emptyCache() = 0;
253
+ virtual void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) = 0;
254
+ virtual void* getBaseAllocation(void* ptr, size_t* size) = 0;
255
+ virtual void recordStream(const DataPtr&, CUDAStream stream) = 0;
256
+ virtual DeviceStats getDeviceStats(c10::DeviceIndex device) = 0;
257
+ virtual void resetAccumulatedStats(c10::DeviceIndex device) = 0;
258
+ virtual void resetPeakStats(c10::DeviceIndex device) = 0;
259
+ virtual SnapshotInfo snapshot() = 0;
260
+ virtual void beginAllocateToPool(
261
+ c10::DeviceIndex device,
262
+ MempoolId_t mempool_id,
263
+ std::function<bool(cudaStream_t)> filter) = 0;
264
+ virtual void endAllocateToPool(
265
+ c10::DeviceIndex device,
266
+ MempoolId_t mempool_id) = 0;
267
+ virtual void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) = 0;
268
+ // returns true if the allocated blocks are equal to expected live allocations
269
+ virtual bool checkPoolLiveAllocations(
270
+ c10::DeviceIndex device,
271
+ MempoolId_t mempool_id,
272
+ const std::unordered_set<void*>& expected_live_allocations) {
273
+ TORCH_CHECK(
274
+ false,
275
+ name(),
276
+ " does not yet support checkPoolLiveAllocations. "
277
+ "If you need it, please file an issue describing your use case.");
278
+ }
279
+ virtual std::shared_ptr<void> getIpcDevPtr(std::string handle) = 0;
280
+ virtual bool isHistoryEnabled() {
281
+ TORCH_CHECK(
282
+ false,
283
+ name(),
284
+ " does not yet support recordHistory. "
285
+ "If you need it, please file an issue describing your use case.");
286
+ }
287
+ virtual void recordHistory(
288
+ bool enabled,
289
+ CreateContextFn context_recorder,
290
+ size_t alloc_trace_max_entries,
291
+ RecordContext when) = 0;
292
+ virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
293
+
294
+ // Attached AllocatorTraceTracker callbacks will be called while the
295
+ // per-device allocator lock is held. Any additional locks taken from within
296
+ // the callback must be proven to always have the lock order that never
297
+ // triggers a deadlock. In particular, Python's GIL may be held when
298
+ // calling the allocator so it is unsafe to try to acquire the GIL in this
299
+ // callback.
300
+ virtual void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) = 0;
301
+
302
+ virtual void enablePeerAccess(
303
+ c10::DeviceIndex dev,
304
+ c10::DeviceIndex dev_to_access) = 0;
305
+
306
+ // memory not allocated from cudaMalloc cannot be copied
307
+ // across devices using cudaMemcpyAsync if peer to peer access is disabled.
308
+ // instead it requires cudaMemcpyAsyncPeer
309
+ // with P2P Enabled, all combinations work
310
+ // with P2P Disabled:
311
+ // cudaMalloc cudaMallocAsync/cuMemMap
312
+ // cudaMemcpyAsyncPeer works works
313
+ // cudaMemcpyAsync works error
314
+
315
+ // This function performs chooses to use the Peer version of
316
+ // memcpy if required based on where the allocated put dst/src.
317
+ virtual cudaError_t memcpyAsync(
318
+ void* dst,
319
+ int dstDevice,
320
+ const void* src,
321
+ int srcDevice,
322
+ size_t count,
323
+ cudaStream_t stream,
324
+ bool p2p_enabled) = 0;
325
+ virtual std::shared_ptr<AllocatorState> getCheckpointState(
326
+ c10::DeviceIndex device,
327
+ MempoolId_t id) = 0;
328
+ virtual CheckpointDelta setCheckpointPoolState(
329
+ c10::DeviceIndex device,
330
+ std::shared_ptr<AllocatorState> pps) = 0;
331
+ virtual std::string name() = 0;
332
+ };
333
+
334
+ // Allocator object, statically initialized
335
+ // See BackendInitializer in CUDACachingAllocator.cpp.
336
+ // Atomic loads on x86 are just normal loads,
337
+ // (atomic stores are different), so reading this value
338
+ // is no different than loading a pointer.
339
+ C10_CUDA_API extern std::atomic<CUDAAllocator*> allocator;
340
+
341
+ inline CUDAAllocator* get() {
342
+ return allocator.load();
343
+ }
344
+
345
+ // Called directly by clients.
346
+ inline void* raw_alloc(size_t nbytes) {
347
+ return get()->raw_alloc(nbytes);
348
+ }
349
+
350
+ inline void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) {
351
+ return get()->raw_alloc_with_stream(nbytes, stream);
352
+ }
353
+
354
+ inline void raw_delete(void* ptr) {
355
+ return get()->raw_delete(ptr);
356
+ }
357
+
358
+ inline void init(int device_count) {
359
+ return get()->init(device_count);
360
+ }
361
+
362
+ inline void setMemoryFraction(double fraction, c10::DeviceIndex device) {
363
+ return get()->setMemoryFraction(fraction, device);
364
+ }
365
+
366
+ inline void emptyCache() {
367
+ return get()->emptyCache();
368
+ }
369
+
370
+ inline void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) {
371
+ return get()->cacheInfo(device, largestBlock);
372
+ }
373
+
374
+ inline void* getBaseAllocation(void* ptr, size_t* size) {
375
+ return get()->getBaseAllocation(ptr, size);
376
+ }
377
+
378
+ inline void recordStream(const DataPtr& dataPtr, CUDAStream stream) {
379
+ return get()->recordStream(dataPtr, stream);
380
+ }
381
+
382
+ inline DeviceStats getDeviceStats(c10::DeviceIndex device) {
383
+ return get()->getDeviceStats(device);
384
+ }
385
+
386
+ inline void resetAccumulatedStats(c10::DeviceIndex device) {
387
+ return get()->resetAccumulatedStats(device);
388
+ }
389
+
390
+ inline void resetPeakStats(c10::DeviceIndex device) {
391
+ return get()->resetPeakStats(device);
392
+ }
393
+
394
+ inline SnapshotInfo snapshot() {
395
+ return get()->snapshot();
396
+ }
397
+
398
+ inline std::shared_ptr<AllocatorState> getCheckpointState(
399
+ c10::DeviceIndex device,
400
+ MempoolId_t id) {
401
+ return get()->getCheckpointState(device, id);
402
+ }
403
+
404
+ inline CheckpointDelta setCheckpointPoolState(
405
+ c10::DeviceIndex device,
406
+ std::shared_ptr<AllocatorState> pps) {
407
+ return get()->setCheckpointPoolState(device, std::move(pps));
408
+ }
409
+
410
+ // CUDAGraph interactions
411
+ inline void beginAllocateToPool(
412
+ c10::DeviceIndex device,
413
+ MempoolId_t mempool_id,
414
+ std::function<bool(cudaStream_t)> filter) {
415
+ get()->beginAllocateToPool(device, mempool_id, std::move(filter));
416
+ }
417
+
418
+ inline void endAllocateToPool(c10::DeviceIndex device, MempoolId_t mempool_id) {
419
+ get()->endAllocateToPool(device, mempool_id);
420
+ }
421
+
422
+ inline void recordHistory(
423
+ bool enabled,
424
+ CreateContextFn context_recorder,
425
+ size_t alloc_trace_max_entries,
426
+ RecordContext when) {
427
+ return get()->recordHistory(
428
+ enabled, context_recorder, alloc_trace_max_entries, when);
429
+ }
430
+
431
+ inline bool isHistoryEnabled() {
432
+ return get()->isHistoryEnabled();
433
+ }
434
+
435
+ inline bool checkPoolLiveAllocations(
436
+ c10::DeviceIndex device,
437
+ MempoolId_t mempool_id,
438
+ const std::unordered_set<void*>& expected_live_allocations) {
439
+ return get()->checkPoolLiveAllocations(
440
+ device, mempool_id, expected_live_allocations);
441
+ }
442
+
443
+ inline void attachOutOfMemoryObserver(OutOfMemoryObserver observer) {
444
+ return get()->attachOutOfMemoryObserver(std::move(observer));
445
+ }
446
+
447
+ inline void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) {
448
+ return get()->attachAllocatorTraceTracker(std::move(tracker));
449
+ }
450
+
451
+ inline void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) {
452
+ return get()->releasePool(device, mempool_id);
453
+ }
454
+ // Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE
455
+ inline std::shared_ptr<void> getIpcDevPtr(std::string handle) {
456
+ return get()->getIpcDevPtr(std::move(handle));
457
+ }
458
+
459
+ inline std::string name() {
460
+ return get()->name();
461
+ }
462
+
463
+ inline cudaError_t memcpyAsync(
464
+ void* dst,
465
+ int dstDevice,
466
+ const void* src,
467
+ int srcDevice,
468
+ size_t count,
469
+ cudaStream_t stream,
470
+ bool p2p_enabled) {
471
+ return get()->memcpyAsync(
472
+ dst, dstDevice, src, srcDevice, count, stream, p2p_enabled);
473
+ }
474
+
475
+ inline void enablePeerAccess(
476
+ c10::DeviceIndex dev,
477
+ c10::DeviceIndex dev_to_access) {
478
+ return get()->enablePeerAccess(dev, dev_to_access);
479
+ }
480
+
481
+ } // namespace c10::cuda::CUDACachingAllocator
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAException.h>
4
+ #include <c10/macros/Macros.h>
5
+
6
+ namespace c10::cuda {
7
+
8
+ #ifdef TORCH_USE_CUDA_DSA
9
+ // Copy string from `src` to `dst`
10
+ static __device__ void dstrcpy(char* dst, const char* src) {
11
+ int i = 0;
12
+ // Copy string from source to destination, ensuring that it
13
+ // isn't longer than `C10_CUDA_DSA_MAX_STR_LEN-1`
14
+ while (*src != '\0' && i++ < C10_CUDA_DSA_MAX_STR_LEN - 1) {
15
+ *dst++ = *src++;
16
+ }
17
+ *dst = '\0';
18
+ }
19
+
20
+ static __device__ void dsa_add_new_assertion_failure(
21
+ DeviceAssertionsData* assertions_data,
22
+ const char* assertion_msg,
23
+ const char* filename,
24
+ const char* function_name,
25
+ const int line_number,
26
+ const uint32_t caller,
27
+ const dim3 block_id,
28
+ const dim3 thread_id) {
29
+ // `assertions_data` may be nullptr if device-side assertion checking
30
+ // is disabled at run-time. If it is disabled at compile time this
31
+ // function will never be called
32
+ if (!assertions_data) {
33
+ return;
34
+ }
35
+
36
+ // Atomically increment so other threads can fail at the same time
37
+ // Note that incrementing this means that the CPU can observe that
38
+ // a failure has happened and can begin to respond before we've
39
+ // written information about that failure out to the buffer.
40
+ const auto nid = atomicAdd(&(assertions_data->assertion_count), 1);
41
+
42
+ if (nid >= C10_CUDA_DSA_ASSERTION_COUNT) {
43
+ // At this point we're ran out of assertion buffer space.
44
+ // We could print a message about this, but that'd get
45
+ // spammy if a lot of threads did it, so we just silently
46
+ // ignore any other assertion failures. In most cases the
47
+ // failures will all probably be analogous anyway.
48
+ return;
49
+ }
50
+
51
+ // Write information about the assertion failure to memory.
52
+ // Note that this occurs only after the `assertion_count`
53
+ // increment broadcasts that there's been a problem.
54
+ auto& self = assertions_data->assertions[nid];
55
+ dstrcpy(self.assertion_msg, assertion_msg);
56
+ dstrcpy(self.filename, filename);
57
+ dstrcpy(self.function_name, function_name);
58
+ self.line_number = line_number;
59
+ self.caller = caller;
60
+ self.block_id[0] = block_id.x;
61
+ self.block_id[1] = block_id.y;
62
+ self.block_id[2] = block_id.z;
63
+ self.thread_id[0] = thread_id.x;
64
+ self.thread_id[1] = thread_id.y;
65
+ self.thread_id[2] = thread_id.z;
66
+ }
67
+
68
+ // Emulates a kernel assertion. The assertion won't stop the kernel's progress,
69
+ // so you should assume everything the kernel produces is garbage if there's an
70
+ // assertion failure.
71
+ // NOTE: This assumes that `assertions_data` and `assertion_caller_id` are
72
+ // arguments of the kernel and therefore accessible.
73
+ #define CUDA_KERNEL_ASSERT2(condition) \
74
+ do { \
75
+ if (C10_UNLIKELY(!(condition))) { \
76
+ /* Has an atomic element so threads can fail at the same time */ \
77
+ c10::cuda::dsa_add_new_assertion_failure( \
78
+ assertions_data, \
79
+ C10_STRINGIZE(condition), \
80
+ __FILE__, \
81
+ __FUNCTION__, \
82
+ __LINE__, \
83
+ assertion_caller_id, \
84
+ blockIdx, \
85
+ threadIdx); \
86
+ /* Now that the kernel has failed we early exit the kernel, but */ \
87
+ /* otherwise keep going and rely on the host to check UVM and */ \
88
+ /* determine we've had a problem */ \
89
+ return; \
90
+ } \
91
+ } while (false)
92
+ #else
93
+ #define CUDA_KERNEL_ASSERT2(condition) assert(condition)
94
+ #endif
95
+
96
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAMacros.h>
4
+
5
+ #include <cstdint>
6
+ #include <memory>
7
+ #include <mutex>
8
+ #include <string>
9
+ #include <utility>
10
+ #include <vector>
11
+
12
+ #ifdef USE_CUDA
13
+ #define TORCH_USE_CUDA_DSA
14
+ #endif
15
+
16
+ /// Number of assertion failure messages we can store. If this is too small
17
+ /// threads will fail silently.
18
+ constexpr int C10_CUDA_DSA_ASSERTION_COUNT = 10;
19
+ constexpr int C10_CUDA_DSA_MAX_STR_LEN = 512;
20
+
21
+ namespace c10::cuda {
22
+
23
+ /// Holds information about any device-side assertions that fail.
24
+ /// Held in managed memory and access by both the CPU and the GPU.
25
+ struct DeviceAssertionData {
26
+ /// Stringification of the assertion
27
+ // NOLINTNEXTLINE(*-c-arrays)
28
+ char assertion_msg[C10_CUDA_DSA_MAX_STR_LEN]{};
29
+ /// File the assertion was in
30
+ // NOLINTNEXTLINE(*-c-arrays)
31
+ char filename[C10_CUDA_DSA_MAX_STR_LEN]{};
32
+ /// Name of the function the assertion was in
33
+ // NOLINTNEXTLINE(*-c-arrays)
34
+ char function_name[C10_CUDA_DSA_MAX_STR_LEN]{};
35
+ /// Line number the assertion was at
36
+ int line_number{};
37
+ /// Number uniquely identifying the kernel launch that triggered the assertion
38
+ uint32_t caller{};
39
+ /// block_id of the thread that failed the assertion
40
+ // NOLINTNEXTLINE(*-c-arrays)
41
+ int32_t block_id[3]{};
42
+ /// third_id of the thread that failed the assertion
43
+ // NOLINTNEXTLINE(*-c-arrays)
44
+ int32_t thread_id[3]{};
45
+ };
46
+
47
+ /// Used to hold assertions generated by the device
48
+ /// Held in managed memory and access by both the CPU and the GPU.
49
+ struct DeviceAssertionsData {
50
+ /// Total number of assertions found; a subset of thse will be recorded
51
+ /// in `assertions`
52
+ int32_t assertion_count{};
53
+ /// An array of assertions that will be written to in a race-free manner
54
+ // NOLINTNEXTLINE(*-c-arrays)
55
+ DeviceAssertionData assertions[C10_CUDA_DSA_ASSERTION_COUNT]{};
56
+ };
57
+
58
+ /// Use to hold info about kernel launches so that we can run kernels
59
+ /// asynchronously and still associate launches with device-side
60
+ /// assertion failures
61
+ struct CUDAKernelLaunchInfo {
62
+ /// Filename of the code where the kernel was launched from
63
+ const char* launch_filename;
64
+ /// Function from which the kernel was launched
65
+ const char* launch_function;
66
+ /// Line number of where the code was launched from
67
+ uint32_t launch_linenum;
68
+ /// Backtrace of where the kernel was launched from, only populated if
69
+ /// CUDAKernelLaunchRegistry::gather_launch_stacktrace is True
70
+ std::string launch_stacktrace;
71
+ /// Kernel that was launched
72
+ const char* kernel_name;
73
+ /// Device the kernel was launched on
74
+ int device;
75
+ /// Stream the kernel was launched on
76
+ int32_t stream;
77
+ /// A number that uniquely identifies the kernel launch
78
+ uint64_t generation_number;
79
+ };
80
+
81
+ /// Circular buffer used to hold information about kernel launches
82
+ /// this is later used to reconstruct how a device-side kernel assertion failure
83
+ /// occurred CUDAKernelLaunchRegistry is used as a singleton
84
+ class C10_CUDA_API CUDAKernelLaunchRegistry {
85
+ private:
86
+ /// Assume that this is the max number of kernel launches that might ever be
87
+ /// enqueued across all streams on a single device
88
+ static constexpr int max_kernel_launches = 1024;
89
+ /// How many kernel launch infos we've inserted. Used to ensure that circular
90
+ /// queue doesn't provide false information by always increasing, but also to
91
+ /// mark where we are inserting into the queue
92
+ #ifdef TORCH_USE_CUDA_DSA
93
+ uint64_t generation_number = 0;
94
+ #endif
95
+ /// Shared mutex between writer and accessor to ensure multi-threaded safety.
96
+ mutable std::mutex read_write_mutex;
97
+ /// Used to ensure prevent race conditions in GPU memory allocation
98
+ mutable std::mutex gpu_alloc_mutex;
99
+ /// Pointer to managed memory keeping track of device-side assertions. There
100
+ /// is one entry for each possible device the process might work with. Unused
101
+ /// entries are nullptrs. We could also use an unordered_set here, but this
102
+ /// vector design will be faster and the wasted memory is small since we
103
+ /// expect the number of GPUs per node will always be small
104
+ std::vector<
105
+ std::unique_ptr<DeviceAssertionsData, void (*)(DeviceAssertionsData*)>>
106
+ uvm_assertions;
107
+ /// A single circular buffer holds information about every kernel launch the
108
+ /// process makes across all devices.
109
+ std::vector<CUDAKernelLaunchInfo> kernel_launches;
110
+ bool check_env_for_enable_launch_stacktracing() const;
111
+ bool check_env_for_dsa_enabled() const;
112
+
113
+ public:
114
+ CUDAKernelLaunchRegistry();
115
+ /// Register a new kernel launch and obtain a generation number back to be
116
+ /// passed to the kernel
117
+ uint32_t insert(
118
+ const char* launch_filename,
119
+ const char* launch_function,
120
+ const uint32_t launch_linenum,
121
+ const char* kernel_name,
122
+ const int32_t stream_id);
123
+ /// Get copies of the kernel launch registry and each device's assertion
124
+ /// failure buffer so they can be inspected without raising race conditions
125
+ std::
126
+ pair<std::vector<DeviceAssertionsData>, std::vector<CUDAKernelLaunchInfo>>
127
+ snapshot() const;
128
+ /// Get a pointer to the current device's assertion failure buffer. If no such
129
+ /// buffer exists then one is created. This means that the first kernel launch
130
+ /// made on each device will be slightly slower because memory allocations are
131
+ /// required
132
+ DeviceAssertionsData* get_uvm_assertions_ptr_for_current_device();
133
+ /// Gets the global singleton of the registry
134
+ static CUDAKernelLaunchRegistry& get_singleton_ref();
135
+ /// If not all devices support DSA, we disable it
136
+ const bool do_all_devices_support_managed_memory = false;
137
+ /// Whether or not to gather stack traces when launching kernels
138
+ bool gather_launch_stacktrace = false;
139
+ /// Whether or not host-side DSA is enabled or disabled at run-time
140
+ /// Note: Device-side code cannot be enabled/disabled at run-time
141
+ bool enabled_at_runtime = false;
142
+ /// Whether or not a device has indicated a failure
143
+ bool has_failed() const;
144
+ #ifdef TORCH_USE_CUDA_DSA
145
+ const bool enabled_at_compile_time = true;
146
+ #else
147
+ const bool enabled_at_compile_time = false;
148
+ #endif
149
+ };
150
+
151
+ std::string c10_retrieve_device_side_assertion_info();
152
+
153
+ } // namespace c10::cuda
154
+
155
+ // Each kernel launched with TORCH_DSA_KERNEL_LAUNCH
156
+ // requires the same input arguments. We introduce the following macro to
157
+ // standardize these.
158
+ #define TORCH_DSA_KERNEL_ARGS \
159
+ [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, \
160
+ [[maybe_unused]] uint32_t assertion_caller_id
161
+
162
+ // This macro can be used to pass the DSA arguments onward to another
163
+ // function
164
+ #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAException.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDADeviceAssertionHost.h>
4
+ #include <c10/cuda/CUDAMacros.h>
5
+ #include <c10/cuda/CUDAMiscFunctions.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/irange.h>
9
+ #include <cuda.h>
10
+
11
+ // Note [CHECK macro]
12
+ // ~~~~~~~~~~~~~~~~~~
13
+ // This is a macro so that AT_ERROR can get accurate __LINE__
14
+ // and __FILE__ information. We could split this into a short
15
+ // macro and a function implementation if we pass along __LINE__
16
+ // and __FILE__, but no one has found this worth doing.
17
+
18
+ // Used to denote errors from CUDA framework.
19
+ // This needs to be declared here instead util/Exception.h for proper conversion
20
+ // during hipify.
21
+ namespace c10 {
22
+ class C10_CUDA_API CUDAError : public c10::Error {
23
+ using Error::Error;
24
+ };
25
+ } // namespace c10
26
+
27
+ #define C10_CUDA_CHECK(EXPR) \
28
+ do { \
29
+ const cudaError_t __err = EXPR; \
30
+ c10::cuda::c10_cuda_check_implementation( \
31
+ static_cast<int32_t>(__err), \
32
+ __FILE__, \
33
+ __func__, /* Line number data type not well-defined between \
34
+ compilers, so we perform an explicit cast */ \
35
+ static_cast<uint32_t>(__LINE__), \
36
+ true); \
37
+ } while (0)
38
+
39
+ #define C10_CUDA_CHECK_WARN(EXPR) \
40
+ do { \
41
+ const cudaError_t __err = EXPR; \
42
+ if (C10_UNLIKELY(__err != cudaSuccess)) { \
43
+ auto error_unused C10_UNUSED = cudaGetLastError(); \
44
+ (void)error_unused; \
45
+ TORCH_WARN("CUDA warning: ", cudaGetErrorString(__err)); \
46
+ } \
47
+ } while (0)
48
+
49
+ // Indicates that a CUDA error is handled in a non-standard way
50
+ #define C10_CUDA_ERROR_HANDLED(EXPR) EXPR
51
+
52
+ // Intentionally ignore a CUDA error
53
+ #define C10_CUDA_IGNORE_ERROR(EXPR) \
54
+ do { \
55
+ const cudaError_t __err = EXPR; \
56
+ if (C10_UNLIKELY(__err != cudaSuccess)) { \
57
+ cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
58
+ (void)error_unused; \
59
+ } \
60
+ } while (0)
61
+
62
+ // Clear the last CUDA error
63
+ #define C10_CUDA_CLEAR_ERROR() \
64
+ do { \
65
+ cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
66
+ (void)error_unused; \
67
+ } while (0)
68
+
69
+ // This should be used directly after every kernel launch to ensure
70
+ // the launch happened correctly and provide an early, close-to-source
71
+ // diagnostic if it didn't.
72
+ #define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError())
73
+
74
+ /// Launches a CUDA kernel appending to it all the information need to handle
75
+ /// device-side assertion failures. Checks that the launch was successful.
76
+ #define TORCH_DSA_KERNEL_LAUNCH( \
77
+ kernel, blocks, threads, shared_mem, stream, ...) \
78
+ do { \
79
+ auto& launch_registry = \
80
+ c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref(); \
81
+ kernel<<<blocks, threads, shared_mem, stream>>>( \
82
+ __VA_ARGS__, \
83
+ launch_registry.get_uvm_assertions_ptr_for_current_device(), \
84
+ launch_registry.insert( \
85
+ __FILE__, __FUNCTION__, __LINE__, #kernel, stream.id())); \
86
+ C10_CUDA_KERNEL_LAUNCH_CHECK(); \
87
+ } while (0)
88
+
89
+ namespace c10::cuda {
90
+
91
+ /// In the event of a CUDA failure, formats a nice error message about that
92
+ /// failure and also checks for device-side assertion failures
93
+ C10_CUDA_API void c10_cuda_check_implementation(
94
+ const int32_t err,
95
+ const char* filename,
96
+ const char* function_name,
97
+ const int line_number,
98
+ const bool include_device_assertions);
99
+
100
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAFunctions.h ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This header provides C++ wrappers around commonly used CUDA API functions.
4
+ // The benefit of using C++ here is that we can raise an exception in the
5
+ // event of an error, rather than explicitly pass around error codes. This
6
+ // leads to more natural APIs.
7
+ //
8
+ // The naming convention used here matches the naming convention of torch.cuda
9
+
10
+ #include <c10/core/Device.h>
11
+ #include <c10/core/impl/GPUTrace.h>
12
+ #include <c10/cuda/CUDAException.h>
13
+ #include <c10/cuda/CUDAMacros.h>
14
+ #include <cuda_runtime_api.h>
15
+ namespace c10::cuda {
16
+
17
+ // NB: In the past, we were inconsistent about whether or not this reported
18
+ // an error if there were driver problems are not. Based on experience
19
+ // interacting with users, it seems that people basically ~never want this
20
+ // function to fail; it should just return zero if things are not working.
21
+ // Oblige them.
22
+ // It still might log a warning for user first time it's invoked
23
+ C10_CUDA_API DeviceIndex device_count() noexcept;
24
+
25
+ // Version of device_count that throws is no devices are detected
26
+ C10_CUDA_API DeviceIndex device_count_ensure_non_zero();
27
+
28
+ C10_CUDA_API DeviceIndex current_device();
29
+
30
+ C10_CUDA_API void set_device(DeviceIndex device);
31
+
32
+ C10_CUDA_API void device_synchronize();
33
+
34
+ C10_CUDA_API void warn_or_error_on_sync();
35
+
36
+ // Raw CUDA device management functions
37
+ C10_CUDA_API cudaError_t GetDeviceCount(int* dev_count);
38
+
39
+ C10_CUDA_API cudaError_t GetDevice(DeviceIndex* device);
40
+
41
+ C10_CUDA_API cudaError_t SetDevice(DeviceIndex device);
42
+
43
+ C10_CUDA_API cudaError_t MaybeSetDevice(DeviceIndex device);
44
+
45
+ C10_CUDA_API DeviceIndex ExchangeDevice(DeviceIndex device);
46
+
47
+ C10_CUDA_API DeviceIndex MaybeExchangeDevice(DeviceIndex device);
48
+
49
+ C10_CUDA_API void SetTargetDevice();
50
+
51
+ enum class SyncDebugMode { L_DISABLED = 0, L_WARN, L_ERROR };
52
+
53
+ // this is a holder for c10 global state (similar to at GlobalContext)
54
+ // currently it's used to store cuda synchronization warning state,
55
+ // but can be expanded to hold other related global state, e.g. to
56
+ // record stream usage
57
+ class WarningState {
58
+ public:
59
+ void set_sync_debug_mode(SyncDebugMode l) {
60
+ sync_debug_mode = l;
61
+ }
62
+
63
+ SyncDebugMode get_sync_debug_mode() {
64
+ return sync_debug_mode;
65
+ }
66
+
67
+ private:
68
+ SyncDebugMode sync_debug_mode = SyncDebugMode::L_DISABLED;
69
+ };
70
+
71
+ C10_CUDA_API __inline__ WarningState& warning_state() {
72
+ static WarningState warning_state_;
73
+ return warning_state_;
74
+ }
75
+ // the subsequent functions are defined in the header because for performance
76
+ // reasons we want them to be inline
77
+ C10_CUDA_API void __inline__ memcpy_and_sync(
78
+ void* dst,
79
+ const void* src,
80
+ int64_t nbytes,
81
+ cudaMemcpyKind kind,
82
+ cudaStream_t stream) {
83
+ if (C10_UNLIKELY(
84
+ warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) {
85
+ warn_or_error_on_sync();
86
+ }
87
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
88
+ if (C10_UNLIKELY(interp)) {
89
+ (*interp)->trace_gpu_stream_synchronization(
90
+ reinterpret_cast<uintptr_t>(stream));
91
+ }
92
+ #if defined(TORCH_HIP_VERSION) && (TORCH_HIP_VERSION >= 301)
93
+ C10_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
94
+ #else
95
+ C10_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
96
+ C10_CUDA_CHECK(cudaStreamSynchronize(stream));
97
+ #endif
98
+ }
99
+
100
+ C10_CUDA_API void __inline__ stream_synchronize(cudaStream_t stream) {
101
+ if (C10_UNLIKELY(
102
+ warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) {
103
+ warn_or_error_on_sync();
104
+ }
105
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
106
+ if (C10_UNLIKELY(interp)) {
107
+ (*interp)->trace_gpu_stream_synchronization(
108
+ reinterpret_cast<uintptr_t>(stream));
109
+ }
110
+ C10_CUDA_CHECK(cudaStreamSynchronize(stream));
111
+ }
112
+
113
+ C10_CUDA_API bool hasPrimaryContext(DeviceIndex device_index);
114
+ C10_CUDA_API c10::optional<DeviceIndex> getDeviceIndexWithPrimaryContext();
115
+
116
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGraphsC10Utils.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAStream.h>
4
+ #include <iostream>
5
+ #include <utility>
6
+
7
+ // CUDA Graphs utils used by c10 and aten.
8
+ // aten/cuda/CUDAGraphsUtils.cuh adds utils used by aten only.
9
+
10
+ namespace c10::cuda {
11
+
12
+ using CaptureId_t = unsigned long long;
13
+
14
+ // first is set if the instance is created by CUDAGraph::capture_begin.
15
+ // second is set if the instance is created by at::cuda::graph_pool_handle.
16
+ using MempoolId_t = std::pair<CaptureId_t, CaptureId_t>;
17
+
18
+ // RAII guard for "cudaStreamCaptureMode", a thread-local value
19
+ // that controls the error-checking strictness of a capture.
20
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
21
+ struct C10_CUDA_API CUDAStreamCaptureModeGuard {
22
+ CUDAStreamCaptureModeGuard(cudaStreamCaptureMode desired)
23
+ : strictness_(desired) {
24
+ C10_CUDA_CHECK(cudaThreadExchangeStreamCaptureMode(&strictness_));
25
+ }
26
+ ~CUDAStreamCaptureModeGuard() {
27
+ C10_CUDA_CHECK_WARN(cudaThreadExchangeStreamCaptureMode(&strictness_));
28
+ }
29
+
30
+ private:
31
+ cudaStreamCaptureMode strictness_;
32
+ };
33
+ #endif
34
+
35
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
36
+ // Protects against enum cudaStreamCaptureStatus implementation changes.
37
+ // Some compilers seem not to like static_assert without the messages.
38
+ static_assert(
39
+ int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone) == 0,
40
+ "unexpected int(cudaStreamCaptureStatusNone) value");
41
+ static_assert(
42
+ int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive) == 1,
43
+ "unexpected int(cudaStreamCaptureStatusActive) value");
44
+ static_assert(
45
+ int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated) == 2,
46
+ "unexpected int(cudaStreamCaptureStatusInvalidated) value");
47
+ #endif
48
+
49
+ enum class CaptureStatus : int {
50
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
51
+ None = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone),
52
+ Active = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive),
53
+ Invalidated = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated)
54
+ #else
55
+ None = 0
56
+ #endif
57
+ };
58
+
59
+ inline std::ostream& operator<<(std::ostream& os, CaptureStatus status) {
60
+ switch (status) {
61
+ case CaptureStatus::None:
62
+ os << "cudaStreamCaptureStatusNone";
63
+ break;
64
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
65
+ case CaptureStatus::Active:
66
+ os << "cudaStreamCaptureStatusActive";
67
+ break;
68
+ case CaptureStatus::Invalidated:
69
+ os << "cudaStreamCaptureStatusInvalidated";
70
+ break;
71
+ #endif
72
+ default:
73
+ TORCH_INTERNAL_ASSERT(
74
+ false, "Unknown CUDA graph CaptureStatus", int(status));
75
+ }
76
+ return os;
77
+ }
78
+
79
+ // Use this version where you're sure a CUDA context exists already.
80
+ inline CaptureStatus currentStreamCaptureStatusMayInitCtx() {
81
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
82
+ cudaStreamCaptureStatus is_capturing{cudaStreamCaptureStatusNone};
83
+ C10_CUDA_CHECK(
84
+ cudaStreamIsCapturing(c10::cuda::getCurrentCUDAStream(), &is_capturing));
85
+ return CaptureStatus(is_capturing);
86
+ #else
87
+ return CaptureStatus::None;
88
+ #endif
89
+ }
90
+
91
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGuard.h ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/impl/InlineDeviceGuard.h>
5
+ #include <c10/core/impl/InlineStreamGuard.h>
6
+ #include <c10/cuda/CUDAMacros.h>
7
+ #include <c10/cuda/impl/CUDAGuardImpl.h>
8
+
9
+ #include <cstddef>
10
+
11
+ namespace c10::cuda {
12
+
13
+ // This code is kind of boilerplatey. See Note [Whither the DeviceGuard
14
+ // boilerplate]
15
+
16
+ /// A variant of DeviceGuard that is specialized for CUDA. It accepts
17
+ /// integer indices (interpreting them as CUDA devices) and is a little
18
+ /// more efficient than DeviceGuard (it compiles to straight line
19
+ /// cudaSetDevice/cudaGetDevice calls); however, it can only be used
20
+ /// from code that links against CUDA directly.
21
+ struct CUDAGuard {
22
+ /// No default constructor; see Note [Omitted default constructor from RAII]
23
+ explicit CUDAGuard() = delete;
24
+
25
+ /// Set the current CUDA device to the passed device index.
26
+ explicit CUDAGuard(DeviceIndex device_index) : guard_(device_index) {}
27
+
28
+ /// Sets the current CUDA device to the passed device. Errors if the passed
29
+ /// device is not a CUDA device.
30
+ explicit CUDAGuard(Device device) : guard_(device) {}
31
+
32
+ // Copy is not allowed
33
+ CUDAGuard(const CUDAGuard&) = delete;
34
+ CUDAGuard& operator=(const CUDAGuard&) = delete;
35
+
36
+ // Move is not allowed (there is no uninitialized state)
37
+ CUDAGuard(CUDAGuard&& other) = delete;
38
+ CUDAGuard& operator=(CUDAGuard&& other) = delete;
39
+
40
+ /// Sets the CUDA device to the given device. Errors if the given device
41
+ /// is not a CUDA device.
42
+ void set_device(Device device) {
43
+ guard_.set_device(device);
44
+ }
45
+
46
+ /// Sets the CUDA device to the given device. Errors if the given device
47
+ /// is not a CUDA device. (This method is provided for uniformity with
48
+ /// DeviceGuard).
49
+ void reset_device(Device device) {
50
+ guard_.reset_device(device);
51
+ }
52
+
53
+ /// Sets the CUDA device to the given device index.
54
+ void set_index(DeviceIndex device_index) {
55
+ guard_.set_index(device_index);
56
+ }
57
+
58
+ /// Returns the device that was set upon construction of the guard
59
+ Device original_device() const {
60
+ return guard_.original_device();
61
+ }
62
+
63
+ /// Returns the last device that was set via `set_device`, if any, otherwise
64
+ /// the device passed during construction.
65
+ Device current_device() const {
66
+ return guard_.current_device();
67
+ }
68
+
69
+ private:
70
+ /// The guard for the current device.
71
+ c10::impl::InlineDeviceGuard<impl::CUDAGuardImpl> guard_;
72
+ };
73
+
74
+ /// A variant of OptionalDeviceGuard that is specialized for CUDA. See
75
+ /// CUDAGuard for when you can use this.
76
+ struct OptionalCUDAGuard {
77
+ /// Create an uninitialized OptionalCUDAGuard.
78
+ explicit OptionalCUDAGuard() : guard_() {}
79
+
80
+ /// Set the current CUDA device to the passed Device, if it is not nullopt.
81
+ explicit OptionalCUDAGuard(optional<Device> device_opt)
82
+ : guard_(device_opt) {}
83
+
84
+ /// Set the current CUDA device to the passed device index, if it is not
85
+ /// nullopt
86
+ explicit OptionalCUDAGuard(optional<DeviceIndex> device_index_opt)
87
+ : guard_(device_index_opt) {}
88
+
89
+ // Copy is not allowed
90
+ OptionalCUDAGuard(const OptionalCUDAGuard&) = delete;
91
+ OptionalCUDAGuard& operator=(const OptionalCUDAGuard&) = delete;
92
+
93
+ // See Note [Move construction for RAII guards is tricky]
94
+ OptionalCUDAGuard(OptionalCUDAGuard&& other) = delete;
95
+
96
+ // See Note [Move assignment for RAII guards is tricky]
97
+ OptionalCUDAGuard& operator=(OptionalCUDAGuard&& other) = delete;
98
+
99
+ /// Sets the CUDA device to the given device, initializing the guard if it
100
+ /// is not already initialized. Errors if the given device is not a CUDA
101
+ /// device.
102
+ void set_device(Device device) {
103
+ guard_.set_device(device);
104
+ }
105
+
106
+ /// Sets the CUDA device to the given device, initializing the guard if it is
107
+ /// not already initialized. Errors if the given device is not a CUDA device.
108
+ /// (This method is provided for uniformity with OptionalDeviceGuard).
109
+ void reset_device(Device device) {
110
+ guard_.reset_device(device);
111
+ }
112
+
113
+ /// Sets the CUDA device to the given device index, initializing the guard if
114
+ /// it is not already initialized.
115
+ void set_index(DeviceIndex device_index) {
116
+ guard_.set_index(device_index);
117
+ }
118
+
119
+ /// Returns the device that was set immediately prior to initialization of the
120
+ /// guard, or nullopt if the guard is uninitialized.
121
+ optional<Device> original_device() const {
122
+ return guard_.original_device();
123
+ }
124
+
125
+ /// Returns the most recent device that was set using this device guard,
126
+ /// either from construction, or via set_device, if the guard is initialized,
127
+ /// or nullopt if the guard is uninitialized.
128
+ optional<Device> current_device() const {
129
+ return guard_.current_device();
130
+ }
131
+
132
+ /// Restore the original CUDA device, resetting this guard to uninitialized
133
+ /// state.
134
+ void reset() {
135
+ guard_.reset();
136
+ }
137
+
138
+ private:
139
+ c10::impl::InlineOptionalDeviceGuard<impl::CUDAGuardImpl> guard_;
140
+ };
141
+
142
+ /// A variant of StreamGuard that is specialized for CUDA. See CUDAGuard
143
+ /// for when you can use this.
144
+ struct CUDAStreamGuard {
145
+ /// No default constructor, see Note [Omitted default constructor from RAII]
146
+ explicit CUDAStreamGuard() = delete;
147
+
148
+ /// Set the current CUDA device to the device associated with the passed
149
+ /// stream, and set the current CUDA stream on that device to the passed
150
+ /// stream. Errors if the Stream is not a CUDA stream.
151
+ explicit CUDAStreamGuard(Stream stream) : guard_(stream) {}
152
+
153
+ /// Copy is disallowed
154
+ CUDAStreamGuard(const CUDAStreamGuard&) = delete;
155
+ CUDAStreamGuard& operator=(const CUDAStreamGuard&) = delete;
156
+
157
+ /// Move is disallowed, as CUDAStreamGuard does not have an uninitialized
158
+ /// state, which is required for moves on types with nontrivial destructors.
159
+ CUDAStreamGuard(CUDAStreamGuard&& other) = delete;
160
+ CUDAStreamGuard& operator=(CUDAStreamGuard&& other) = delete;
161
+
162
+ /// Resets the currently set stream to the original stream and
163
+ /// the currently set device to the original device. Then,
164
+ /// set the current device to the device associated with the passed stream,
165
+ /// and set the current stream on that device to the passed stream.
166
+ /// Errors if the stream passed is not a CUDA stream.
167
+ ///
168
+ /// NOTE: this implementation may skip some stream/device setting if
169
+ /// it can prove that it is unnecessary.
170
+ ///
171
+ /// WARNING: reset_stream does NOT preserve previously set streams on
172
+ /// different devices. If you need to set streams on multiple devices
173
+ /// on CUDA, use CUDAMultiStreamGuard instead.
174
+ void reset_stream(Stream stream) {
175
+ guard_.reset_stream(stream);
176
+ }
177
+
178
+ /// Returns the CUDA stream that was set at the time the guard was
179
+ /// constructed.
180
+ CUDAStream original_stream() const {
181
+ return CUDAStream(CUDAStream::UNCHECKED, guard_.original_stream());
182
+ }
183
+
184
+ /// Returns the most recent CUDA stream that was set using this device guard,
185
+ /// either from construction, or via set_stream.
186
+ CUDAStream current_stream() const {
187
+ return CUDAStream(CUDAStream::UNCHECKED, guard_.current_stream());
188
+ }
189
+
190
+ /// Returns the most recent CUDA device that was set using this device guard,
191
+ /// either from construction, or via set_device/reset_device/set_index.
192
+ Device current_device() const {
193
+ return guard_.current_device();
194
+ }
195
+
196
+ /// Returns the CUDA device that was set at the most recent reset_stream(),
197
+ /// or otherwise the device at construction time.
198
+ Device original_device() const {
199
+ return guard_.original_device();
200
+ }
201
+
202
+ private:
203
+ c10::impl::InlineStreamGuard<impl::CUDAGuardImpl> guard_;
204
+ };
205
+
206
+ /// A variant of OptionalStreamGuard that is specialized for CUDA. See
207
+ /// CUDAGuard for when you can use this.
208
+ struct OptionalCUDAStreamGuard {
209
+ /// Create an uninitialized guard.
210
+ explicit OptionalCUDAStreamGuard() : guard_() {}
211
+
212
+ /// Set the current CUDA device to the device associated with the passed
213
+ /// stream, and set the current CUDA stream on that device to the passed
214
+ /// stream. Errors if the Stream is not a CUDA stream.
215
+ explicit OptionalCUDAStreamGuard(Stream stream) : guard_(stream) {}
216
+
217
+ /// Set the current device to the device associated with the passed stream,
218
+ /// and set the current stream on that device to the passed stream,
219
+ /// if the passed stream is not nullopt.
220
+ explicit OptionalCUDAStreamGuard(optional<Stream> stream_opt)
221
+ : guard_(stream_opt) {}
222
+
223
+ /// Copy is disallowed
224
+ OptionalCUDAStreamGuard(const OptionalCUDAStreamGuard&) = delete;
225
+ OptionalCUDAStreamGuard& operator=(const OptionalCUDAStreamGuard&) = delete;
226
+
227
+ // See Note [Move construction for RAII guards is tricky]
228
+ OptionalCUDAStreamGuard(OptionalCUDAStreamGuard&& other) = delete;
229
+
230
+ // See Note [Move assignment for RAII guards is tricky]
231
+ OptionalCUDAStreamGuard& operator=(OptionalCUDAStreamGuard&& other) = delete;
232
+
233
+ /// Resets the currently set CUDA stream to the original stream and
234
+ /// the currently set device to the original device. Then,
235
+ /// set the current device to the device associated with the passed stream,
236
+ /// and set the current stream on that device to the passed stream.
237
+ /// Initializes the guard if it was not previously initialized.
238
+ void reset_stream(Stream stream) {
239
+ guard_.reset_stream(stream);
240
+ }
241
+
242
+ /// Returns the CUDA stream that was set at the time the guard was most
243
+ /// recently initialized, or nullopt if the guard is uninitialized.
244
+ optional<CUDAStream> original_stream() const {
245
+ auto r = guard_.original_stream();
246
+ if (r.has_value()) {
247
+ return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
248
+ } else {
249
+ return nullopt;
250
+ }
251
+ }
252
+
253
+ /// Returns the most recent CUDA stream that was set using this stream guard,
254
+ /// either from construction, or via reset_stream, if the guard is
255
+ /// initialized, or nullopt if the guard is uninitialized.
256
+ optional<CUDAStream> current_stream() const {
257
+ auto r = guard_.current_stream();
258
+ if (r.has_value()) {
259
+ return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
260
+ } else {
261
+ return nullopt;
262
+ }
263
+ }
264
+
265
+ /// Restore the original CUDA device and stream, resetting this guard to
266
+ /// uninitialized state.
267
+ void reset() {
268
+ guard_.reset();
269
+ }
270
+
271
+ private:
272
+ c10::impl::InlineOptionalStreamGuard<impl::CUDAGuardImpl> guard_;
273
+ };
274
+
275
+ /// A variant of MultiStreamGuard that is specialized for CUDA.
276
+ struct CUDAMultiStreamGuard {
277
+ explicit CUDAMultiStreamGuard(ArrayRef<CUDAStream> streams)
278
+ : guard_(unwrapStreams(streams)) {}
279
+
280
+ /// Copy is disallowed
281
+ CUDAMultiStreamGuard(const CUDAMultiStreamGuard&) = delete;
282
+ CUDAMultiStreamGuard& operator=(const CUDAMultiStreamGuard&) = delete;
283
+
284
+ // See Note [Move construction for RAII guards is tricky]
285
+ CUDAMultiStreamGuard(CUDAMultiStreamGuard&& other) = delete;
286
+
287
+ // See Note [Move assignment for RAII guards is tricky]
288
+ CUDAMultiStreamGuard& operator=(CUDAMultiStreamGuard&& other) = delete;
289
+
290
+ private:
291
+ c10::impl::InlineMultiStreamGuard<impl::CUDAGuardImpl> guard_;
292
+
293
+ static std::vector<Stream> unwrapStreams(ArrayRef<CUDAStream> cudaStreams) {
294
+ std::vector<Stream> streams;
295
+ streams.reserve(cudaStreams.size());
296
+ for (const CUDAStream& cudaStream : cudaStreams) {
297
+ streams.push_back(cudaStream);
298
+ }
299
+ return streams;
300
+ }
301
+ };
302
+
303
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMacros.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifndef C10_USING_CUSTOM_GENERATED_MACROS
4
+
5
+ // We have not yet modified the AMD HIP build to generate this file so
6
+ // we add an extra option to specifically ignore it.
7
+ #ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE
8
+ #include <c10/cuda/impl/cuda_cmake_macros.h>
9
+ #endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE
10
+
11
+ #endif
12
+
13
+ // See c10/macros/Export.h for a detailed explanation of what the function
14
+ // of these macros are. We need one set of macros for every separate library
15
+ // we build.
16
+
17
+ #ifdef _WIN32
18
+ #if defined(C10_CUDA_BUILD_SHARED_LIBS)
19
+ #define C10_CUDA_EXPORT __declspec(dllexport)
20
+ #define C10_CUDA_IMPORT __declspec(dllimport)
21
+ #else
22
+ #define C10_CUDA_EXPORT
23
+ #define C10_CUDA_IMPORT
24
+ #endif
25
+ #else // _WIN32
26
+ #if defined(__GNUC__)
27
+ #define C10_CUDA_EXPORT __attribute__((__visibility__("default")))
28
+ #else // defined(__GNUC__)
29
+ #define C10_CUDA_EXPORT
30
+ #endif // defined(__GNUC__)
31
+ #define C10_CUDA_IMPORT C10_CUDA_EXPORT
32
+ #endif // _WIN32
33
+
34
+ // This one is being used by libc10_cuda.so
35
+ #ifdef C10_CUDA_BUILD_MAIN_LIB
36
+ #define C10_CUDA_API C10_CUDA_EXPORT
37
+ #else
38
+ #define C10_CUDA_API C10_CUDA_IMPORT
39
+ #endif
40
+
41
+ /**
42
+ * The maximum number of GPUs that we recognizes. Increasing this beyond the
43
+ * initial limit of 16 broke Caffe2 testing, hence the ifdef guards.
44
+ * This value cannot be more than 128 because our DeviceIndex is a uint8_t.
45
+ o */
46
+ #ifdef FBCODE_CAFFE2
47
+ // fbcode depends on this value being 16
48
+ #define C10_COMPILE_TIME_MAX_GPUS 16
49
+ #else
50
+ #define C10_COMPILE_TIME_MAX_GPUS 120
51
+ #endif
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMathCompat.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /* This file defines math functions compatible across different gpu
4
+ * platforms (currently CUDA and HIP).
5
+ */
6
+ #if defined(__CUDACC__) || defined(__HIPCC__)
7
+
8
+ #include <c10/macros/Macros.h>
9
+ #include <c10/util/Exception.h>
10
+
11
+ #ifdef __HIPCC__
12
+ #define __MATH_FUNCTIONS_DECL__ inline C10_DEVICE
13
+ #else /* __HIPCC__ */
14
+ #ifdef __CUDACC_RTC__
15
+ #define __MATH_FUNCTIONS_DECL__ C10_HOST_DEVICE
16
+ #else /* __CUDACC_RTC__ */
17
+ #define __MATH_FUNCTIONS_DECL__ static inline C10_HOST_DEVICE
18
+ #endif /* __CUDACC_RTC__ */
19
+ #endif /* __HIPCC__ */
20
+
21
+ namespace c10::cuda::compat {
22
+
23
+ __MATH_FUNCTIONS_DECL__ float abs(float x) {
24
+ return ::fabsf(x);
25
+ }
26
+ __MATH_FUNCTIONS_DECL__ double abs(double x) {
27
+ return ::fabs(x);
28
+ }
29
+
30
+ __MATH_FUNCTIONS_DECL__ float exp(float x) {
31
+ return ::expf(x);
32
+ }
33
+ __MATH_FUNCTIONS_DECL__ double exp(double x) {
34
+ return ::exp(x);
35
+ }
36
+
37
+ __MATH_FUNCTIONS_DECL__ float ceil(float x) {
38
+ return ::ceilf(x);
39
+ }
40
+ __MATH_FUNCTIONS_DECL__ double ceil(double x) {
41
+ return ::ceil(x);
42
+ }
43
+
44
+ __MATH_FUNCTIONS_DECL__ float copysign(float x, float y) {
45
+ #if defined(__CUDA_ARCH__) || defined(__HIPCC__)
46
+ return ::copysignf(x, y);
47
+ #else
48
+ // std::copysign gets ICE/Segfaults with gcc 7.5/8 on arm64
49
+ // (e.g. Jetson), see PyTorch PR #51834
50
+ // This host function needs to be here for the compiler but is never used
51
+ TORCH_INTERNAL_ASSERT(
52
+ false, "CUDAMathCompat copysign should not run on the CPU");
53
+ #endif
54
+ }
55
+ __MATH_FUNCTIONS_DECL__ double copysign(double x, double y) {
56
+ #if defined(__CUDA_ARCH__) || defined(__HIPCC__)
57
+ return ::copysign(x, y);
58
+ #else
59
+ // see above
60
+ TORCH_INTERNAL_ASSERT(
61
+ false, "CUDAMathCompat copysign should not run on the CPU");
62
+ #endif
63
+ }
64
+
65
+ __MATH_FUNCTIONS_DECL__ float floor(float x) {
66
+ return ::floorf(x);
67
+ }
68
+ __MATH_FUNCTIONS_DECL__ double floor(double x) {
69
+ return ::floor(x);
70
+ }
71
+
72
+ __MATH_FUNCTIONS_DECL__ float log(float x) {
73
+ return ::logf(x);
74
+ }
75
+ __MATH_FUNCTIONS_DECL__ double log(double x) {
76
+ return ::log(x);
77
+ }
78
+
79
+ __MATH_FUNCTIONS_DECL__ float log1p(float x) {
80
+ return ::log1pf(x);
81
+ }
82
+
83
+ __MATH_FUNCTIONS_DECL__ double log1p(double x) {
84
+ return ::log1p(x);
85
+ }
86
+
87
+ __MATH_FUNCTIONS_DECL__ float max(float x, float y) {
88
+ return ::fmaxf(x, y);
89
+ }
90
+ __MATH_FUNCTIONS_DECL__ double max(double x, double y) {
91
+ return ::fmax(x, y);
92
+ }
93
+
94
+ __MATH_FUNCTIONS_DECL__ float min(float x, float y) {
95
+ return ::fminf(x, y);
96
+ }
97
+ __MATH_FUNCTIONS_DECL__ double min(double x, double y) {
98
+ return ::fmin(x, y);
99
+ }
100
+
101
+ __MATH_FUNCTIONS_DECL__ float pow(float x, float y) {
102
+ return ::powf(x, y);
103
+ }
104
+ __MATH_FUNCTIONS_DECL__ double pow(double x, double y) {
105
+ return ::pow(x, y);
106
+ }
107
+
108
+ __MATH_FUNCTIONS_DECL__ void sincos(float x, float* sptr, float* cptr) {
109
+ return ::sincosf(x, sptr, cptr);
110
+ }
111
+ __MATH_FUNCTIONS_DECL__ void sincos(double x, double* sptr, double* cptr) {
112
+ return ::sincos(x, sptr, cptr);
113
+ }
114
+
115
+ __MATH_FUNCTIONS_DECL__ float sqrt(float x) {
116
+ return ::sqrtf(x);
117
+ }
118
+ __MATH_FUNCTIONS_DECL__ double sqrt(double x) {
119
+ return ::sqrt(x);
120
+ }
121
+
122
+ __MATH_FUNCTIONS_DECL__ float rsqrt(float x) {
123
+ return ::rsqrtf(x);
124
+ }
125
+ __MATH_FUNCTIONS_DECL__ double rsqrt(double x) {
126
+ return ::rsqrt(x);
127
+ }
128
+
129
+ __MATH_FUNCTIONS_DECL__ float tan(float x) {
130
+ return ::tanf(x);
131
+ }
132
+ __MATH_FUNCTIONS_DECL__ double tan(double x) {
133
+ return ::tan(x);
134
+ }
135
+
136
+ __MATH_FUNCTIONS_DECL__ float tanh(float x) {
137
+ return ::tanhf(x);
138
+ }
139
+ __MATH_FUNCTIONS_DECL__ double tanh(double x) {
140
+ return ::tanh(x);
141
+ }
142
+
143
+ __MATH_FUNCTIONS_DECL__ float normcdf(float x) {
144
+ return ::normcdff(x);
145
+ }
146
+ __MATH_FUNCTIONS_DECL__ double normcdf(double x) {
147
+ return ::normcdf(x);
148
+ }
149
+
150
+ } // namespace c10::cuda::compat
151
+
152
+ #endif
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMiscFunctions.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // this file is to avoid circular dependency between CUDAFunctions.h and
3
+ // CUDAExceptions.h
4
+
5
+ #include <c10/cuda/CUDAMacros.h>
6
+
7
+ #include <mutex>
8
+
9
+ namespace c10::cuda {
10
+ C10_CUDA_API const char* get_cuda_check_suffix() noexcept;
11
+ C10_CUDA_API std::mutex* getFreeMutex();
12
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAStream.h ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <utility>
5
+
6
+ #include <cuda_runtime_api.h>
7
+
8
+ #include <c10/core/DeviceGuard.h>
9
+ #include <c10/core/Stream.h>
10
+ #include <c10/cuda/CUDAFunctions.h>
11
+ #include <c10/util/Exception.h>
12
+
13
+ /*
14
+ * Stream pool note.
15
+ *
16
+ * A CUDAStream is an abstraction of an actual cuStream on the GPU. CUDAStreams
17
+ * are backed by cuStreams, but they use several pools to minimize the costs
18
+ * associated with creating, retaining, and destroying cuStreams.
19
+ *
20
+ * There are three pools per device, and a device's pools are lazily created.
21
+ *
22
+ * The first pool contains only the default stream. When the default stream
23
+ * is requested it's returned.
24
+ *
25
+ * The second pool is the "low priority" or "default priority" streams. In
26
+ * HIP builds there is no distinction between streams in this pool and streams
27
+ * in the third pool (below). There are 32 of these streams per device, and
28
+ * when a stream is requested one of these streams is returned round-robin.
29
+ * That is, the first stream requested is at index 0, the second at index 1...
30
+ * to index 31, then index 0 again.
31
+ *
32
+ * This means that if 33 low priority streams are requested, the first and
33
+ * last streams requested are actually the same stream (under the covers)
34
+ * and kernels enqueued on them cannot run concurrently.
35
+ *
36
+ * The third pool is the "high priority" streams. The third pool acts like
37
+ * the second pool except the streams are created with a higher priority.
38
+ *
39
+ * These pools suggest that stream users should prefer many short-lived streams,
40
+ * as the cost of acquiring and releasing streams is effectively zero. If
41
+ * many longer-lived streams are required in performance critical scenarios
42
+ * then the functionality here may need to be extended to allow, for example,
43
+ * "reserving" a subset of the pool so that other streams do not accidentally
44
+ * overlap the performance critical streams.
45
+ *
46
+ * Note: although the notion of "current stream for device" is thread local
47
+ * (every OS thread has a separate current stream, as one might expect),
48
+ * the stream pool is global across all threads; stream 0 is always stream 0
49
+ * no matter which thread you use it on. Multiple threads can synchronize
50
+ * on the same stream. Although the CUDA documentation is not very clear
51
+ * on the matter, streams are thread safe; e.g., it is safe to enqueue
52
+ * a kernel on the same stream from two different threads.
53
+ */
54
+
55
+ namespace c10::cuda {
56
+
57
+ static constexpr int max_compile_time_stream_priorities = 4;
58
+
59
+ // Value object representing a CUDA stream. This is just a wrapper
60
+ // around c10::Stream, but it comes with a little extra CUDA-specific
61
+ // functionality (conversion to cudaStream_t), and a guarantee that
62
+ // the wrapped c10::Stream really is a CUDA stream.
63
+ class C10_CUDA_API CUDAStream {
64
+ public:
65
+ enum Unchecked { UNCHECKED };
66
+
67
+ /// Construct a CUDAStream from a Stream. This construction is checked,
68
+ /// and will raise an error if the Stream is not, in fact, a CUDA stream.
69
+ explicit CUDAStream(Stream stream) : stream_(stream) {
70
+ TORCH_CHECK(stream_.device_type() == DeviceType::CUDA);
71
+ }
72
+
73
+ /// Construct a CUDAStream from a Stream with no error checking.
74
+ /// This constructor uses the "named" constructor idiom, and can
75
+ /// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream)
76
+ explicit CUDAStream(Unchecked, Stream stream) : stream_(stream) {}
77
+
78
+ bool operator==(const CUDAStream& other) const noexcept {
79
+ return unwrap() == other.unwrap();
80
+ }
81
+
82
+ bool operator!=(const CUDAStream& other) const noexcept {
83
+ return unwrap() != other.unwrap();
84
+ }
85
+
86
+ /// Implicit conversion to cudaStream_t.
87
+ operator cudaStream_t() const {
88
+ return stream();
89
+ }
90
+
91
+ /// Implicit conversion to Stream (a.k.a., forget that the stream is a
92
+ /// CUDA stream).
93
+ operator Stream() const {
94
+ return unwrap();
95
+ }
96
+
97
+ /// Used to avoid baking in device type explicitly to Python-side API.
98
+ DeviceType device_type() const {
99
+ return DeviceType::CUDA;
100
+ }
101
+
102
+ /// Get the CUDA device index that this stream is associated with.
103
+ DeviceIndex device_index() const {
104
+ return stream_.device_index();
105
+ }
106
+
107
+ /// Get the full Device that this stream is associated with. The Device
108
+ /// is guaranteed to be a CUDA device.
109
+ Device device() const {
110
+ return Device(DeviceType::CUDA, device_index());
111
+ }
112
+
113
+ /// Return the stream ID corresponding to this particular stream.
114
+ StreamId id() const {
115
+ return stream_.id();
116
+ }
117
+
118
+ bool query() const {
119
+ DeviceGuard guard{stream_.device()};
120
+ cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaStreamQuery(stream()));
121
+
122
+ if (err == cudaSuccess) {
123
+ return true;
124
+ } else if (err != cudaErrorNotReady) {
125
+ C10_CUDA_CHECK(err);
126
+ } else {
127
+ // ignore and clear the error if not ready
128
+ (void)cudaGetLastError();
129
+ }
130
+
131
+ return false;
132
+ }
133
+
134
+ void synchronize() const {
135
+ DeviceGuard guard{stream_.device()};
136
+ c10::cuda::stream_synchronize(stream());
137
+ }
138
+
139
+ int priority() const {
140
+ DeviceGuard guard{stream_.device()};
141
+ int priority = 0;
142
+ C10_CUDA_CHECK(cudaStreamGetPriority(stream(), &priority));
143
+ return priority;
144
+ }
145
+
146
+ /// Explicit conversion to cudaStream_t.
147
+ cudaStream_t stream() const;
148
+
149
+ /// Explicit conversion to Stream.
150
+ Stream unwrap() const {
151
+ return stream_;
152
+ }
153
+
154
+ /// Reversibly pack a CUDAStream into a struct representation.
155
+ /// Previously the stream's data was packed into a single int64_t,
156
+ /// as it was assumed the fields would not require more than
157
+ /// 64 bits of storage in total.
158
+ /// See https://github.com/pytorch/pytorch/issues/75854
159
+ /// for more information regarding newer platforms that may violate
160
+ /// this assumption.
161
+ ///
162
+ /// The CUDAStream can be unpacked using unpack().
163
+ struct c10::StreamData3 pack3() const {
164
+ return stream_.pack3();
165
+ }
166
+
167
+ // Unpack a CUDAStream from the 3 fields generated by pack().
168
+ static CUDAStream unpack3(
169
+ StreamId stream_id,
170
+ DeviceIndex device_index,
171
+ DeviceType device_type) {
172
+ return CUDAStream(Stream::unpack3(stream_id, device_index, device_type));
173
+ }
174
+
175
+ static std::tuple<int, int> priority_range() {
176
+ // Note: this returns the range of priority **supported by PyTorch**, not
177
+ // the range of priority **supported by CUDA**. The former is a subset of
178
+ // the latter.
179
+ int least_priority = 0, greatest_priority = 0;
180
+ C10_CUDA_CHECK(
181
+ cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority));
182
+ #ifdef USE_ROCM
183
+ // See Note [HIP stream priorities]
184
+ TORCH_INTERNAL_ASSERT(
185
+ least_priority == 1, "Unexpected HIP stream priority range");
186
+ least_priority = 0;
187
+ #else
188
+ TORCH_INTERNAL_ASSERT(
189
+ least_priority == 0, "Unexpected CUDA stream priority range");
190
+ #endif
191
+ TORCH_INTERNAL_ASSERT(
192
+ greatest_priority <= -1, "Unexpected CUDA stream priority range");
193
+ greatest_priority = std::max(
194
+ -c10::cuda::max_compile_time_stream_priorities + 1, greatest_priority);
195
+ return std::make_tuple(least_priority, greatest_priority);
196
+ }
197
+
198
+ // Deleted for now; use CUDAEvent::block instead
199
+ // void synchronize_with(const CUDAEvent& event) const;
200
+
201
+ private:
202
+ Stream stream_;
203
+ };
204
+
205
+ /**
206
+ * Get a new stream from the CUDA stream pool. You can think of this
207
+ * as "creating" a new stream, but no such creation actually happens;
208
+ * instead, streams are preallocated from the pool and returned in a
209
+ * round-robin fashion.
210
+ *
211
+ * You can request a stream from the high priority pool by setting
212
+ * isHighPriority to true, or a stream for a specific device by setting device
213
+ * (defaulting to the current CUDA stream.)
214
+ */
215
+ C10_API CUDAStream
216
+ getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
217
+ // no default priority to disambiguate overloads
218
+ C10_API CUDAStream
219
+ getStreamFromPool(const int priority, DeviceIndex device = -1);
220
+
221
+ /**
222
+ * Get a CUDAStream from a externally allocated one.
223
+ *
224
+ * This is mainly for interoperability with different libraries where we
225
+ * want to operate on a non-torch allocated stream for data exchange or similar
226
+ * purposes
227
+ */
228
+ C10_API CUDAStream
229
+ getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);
230
+
231
+ /**
232
+ * Get the default CUDA stream, for the passed CUDA device, or for the
233
+ * current device if no device index is passed. The default stream is
234
+ * where most computation occurs when you aren't explicitly using
235
+ * streams.
236
+ */
237
+ C10_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
238
+
239
+ /**
240
+ * Get the current CUDA stream, for the passed CUDA device, or for the
241
+ * current device if no device index is passed. The current CUDA stream
242
+ * will usually be the default CUDA stream for the device, but it may
243
+ * be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard'
244
+ * or 'CUDAStreamGuard'.
245
+ */
246
+ C10_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
247
+
248
+ /**
249
+ * Set the current stream on the device of the passed in stream to be
250
+ * the passed in stream. Yes, you read that right: this function
251
+ * has *nothing* to do with the current device: it toggles the current
252
+ * stream of the device of the passed stream.
253
+ *
254
+ * Confused? Avoid using this function; prefer using 'CUDAStreamGuard' instead
255
+ * (which will switch both your current device and current stream in the way you
256
+ * expect, and reset it back to its original state afterwards).
257
+ */
258
+ C10_API void setCurrentCUDAStream(CUDAStream stream);
259
+
260
+ C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s);
261
+
262
+ } // namespace c10::cuda
263
+
264
+ namespace std {
265
+ template <>
266
+ struct hash<c10::cuda::CUDAStream> {
267
+ size_t operator()(c10::cuda::CUDAStream s) const noexcept {
268
+ return std::hash<c10::Stream>{}(s.unwrap());
269
+ }
270
+ };
271
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/c10/cuda/driver_api.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cuda.h>
3
+ #define NVML_NO_UNVERSIONED_FUNC_DEFS
4
+ #include <nvml.h>
5
+
6
+ #define C10_CUDA_DRIVER_CHECK(EXPR) \
7
+ do { \
8
+ CUresult __err = EXPR; \
9
+ if (__err != CUDA_SUCCESS) { \
10
+ const char* err_str; \
11
+ CUresult get_error_str_err C10_UNUSED = \
12
+ c10::cuda::DriverAPI::get()->cuGetErrorString_(__err, &err_str); \
13
+ if (get_error_str_err != CUDA_SUCCESS) { \
14
+ AT_ERROR("CUDA driver error: unknown error"); \
15
+ } else { \
16
+ AT_ERROR("CUDA driver error: ", err_str); \
17
+ } \
18
+ } \
19
+ } while (0)
20
+
21
+ #define C10_LIBCUDA_DRIVER_API(_) \
22
+ _(cuMemAddressReserve) \
23
+ _(cuMemRelease) \
24
+ _(cuMemMap) \
25
+ _(cuMemAddressFree) \
26
+ _(cuMemSetAccess) \
27
+ _(cuMemUnmap) \
28
+ _(cuMemCreate) \
29
+ _(cuGetErrorString)
30
+
31
+ #define C10_NVML_DRIVER_API(_) \
32
+ _(nvmlInit_v2) \
33
+ _(nvmlDeviceGetHandleByPciBusId_v2) \
34
+ _(nvmlDeviceGetNvLinkRemoteDeviceType) \
35
+ _(nvmlDeviceGetNvLinkRemotePciInfo_v2) \
36
+ _(nvmlDeviceGetComputeRunningProcesses)
37
+
38
+ namespace c10::cuda {
39
+
40
+ struct DriverAPI {
41
+ #define CREATE_MEMBER(name) decltype(&name) name##_;
42
+ C10_LIBCUDA_DRIVER_API(CREATE_MEMBER)
43
+ C10_NVML_DRIVER_API(CREATE_MEMBER)
44
+ #undef CREATE_MEMBER
45
+ static DriverAPI* get();
46
+ static void* get_nvml_handle();
47
+ };
48
+
49
+ } // namespace c10::cuda
venv/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
4
+ #include <c10/core/impl/GPUTrace.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ #include <c10/cuda/CUDACachingAllocator.h>
9
+ #include <c10/cuda/CUDAException.h>
10
+ #include <c10/cuda/CUDAFunctions.h>
11
+ #include <c10/cuda/CUDAStream.h>
12
+
13
+ #include <c10/core/Device.h>
14
+ #include <c10/core/DeviceType.h>
15
+ #include <c10/core/Stream.h>
16
+ #include <c10/core/impl/PyInterpreter.h>
17
+ #include <c10/util/Optional.h>
18
+ #include <cuda_runtime_api.h>
19
+ #include <cstdint>
20
+
21
+ namespace c10::cuda::impl {
22
+
23
+ struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
24
+ static constexpr DeviceType static_type = DeviceType::CUDA;
25
+
26
+ CUDAGuardImpl() = default;
27
+ explicit CUDAGuardImpl(DeviceType t) {
28
+ TORCH_INTERNAL_ASSERT(t == DeviceType::CUDA);
29
+ }
30
+ DeviceType type() const override {
31
+ return DeviceType::CUDA;
32
+ }
33
+ Device exchangeDevice(Device d) const override {
34
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
35
+ auto old_device_index = c10::cuda::ExchangeDevice(d.index());
36
+ return Device(DeviceType::CUDA, old_device_index);
37
+ }
38
+ Device getDevice() const override {
39
+ DeviceIndex device = 0;
40
+ C10_CUDA_CHECK(c10::cuda::GetDevice(&device));
41
+ return Device(DeviceType::CUDA, device);
42
+ }
43
+ c10::optional<Device> uncheckedGetDevice() const noexcept {
44
+ DeviceIndex device{-1};
45
+ const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device));
46
+ C10_CUDA_CHECK_WARN(err);
47
+ if (err != cudaSuccess) {
48
+ return c10::nullopt;
49
+ }
50
+ return Device(DeviceType::CUDA, device);
51
+ }
52
+ void setDevice(Device d) const override {
53
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
54
+ C10_CUDA_CHECK(c10::cuda::SetDevice(d.index()));
55
+ }
56
+ void uncheckedSetDevice(Device d) const noexcept override {
57
+ C10_CUDA_CHECK_WARN(c10::cuda::MaybeSetDevice(d.index()));
58
+ }
59
+ Stream getStream(Device d) const noexcept override {
60
+ return getCurrentCUDAStream(d.index()).unwrap();
61
+ }
62
+ Stream getDefaultStream(Device d) const override {
63
+ return getDefaultCUDAStream(d.index());
64
+ }
65
+ Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
66
+ const override {
67
+ return getStreamFromPool(isHighPriority, d.index());
68
+ }
69
+ // NB: These do NOT set the current device
70
+ Stream exchangeStream(Stream s) const noexcept override {
71
+ CUDAStream cs(s);
72
+ auto old_stream = getCurrentCUDAStream(s.device().index());
73
+ setCurrentCUDAStream(cs);
74
+ return old_stream.unwrap();
75
+ }
76
+ DeviceIndex deviceCount() const noexcept override {
77
+ return device_count();
78
+ }
79
+
80
+ // Event-related functions
81
+ void createEvent(cudaEvent_t* cuda_event, const EventFlag flag) const {
82
+ // Maps PyTorch's Event::Flag to CUDA flag
83
+ auto cuda_flag = cudaEventDefault;
84
+ switch (flag) {
85
+ case EventFlag::PYTORCH_DEFAULT:
86
+ case EventFlag::CUDA_EVENT_DISABLE_TIMING:
87
+ cuda_flag = cudaEventDisableTiming;
88
+ break;
89
+ case EventFlag::BACKEND_DEFAULT:
90
+ case EventFlag::CUDA_EVENT_DEFAULT:
91
+ cuda_flag = cudaEventDefault;
92
+ break;
93
+ default:
94
+ TORCH_CHECK(false, "CUDA event received unknown flag");
95
+ }
96
+
97
+ C10_CUDA_CHECK(cudaEventCreateWithFlags(cuda_event, cuda_flag));
98
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
99
+ if (C10_UNLIKELY(interp)) {
100
+ (*interp)->trace_gpu_event_creation(
101
+ reinterpret_cast<uintptr_t>(cuda_event));
102
+ }
103
+ }
104
+
105
+ void destroyEvent(void* event, const DeviceIndex device_index)
106
+ const noexcept override {
107
+ if (!event)
108
+ return;
109
+ auto cuda_event = static_cast<cudaEvent_t>(event);
110
+ DeviceIndex orig_device{-1};
111
+ C10_CUDA_CHECK_WARN(c10::cuda::GetDevice(&orig_device));
112
+ C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(device_index));
113
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
114
+ if (C10_UNLIKELY(interp)) {
115
+ (*interp)->trace_gpu_event_deletion(
116
+ reinterpret_cast<uintptr_t>(cuda_event));
117
+ }
118
+ C10_CUDA_CHECK_WARN(cudaEventDestroy(cuda_event));
119
+ C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(orig_device));
120
+ }
121
+
122
+ void record(
123
+ void** event,
124
+ const Stream& stream,
125
+ const DeviceIndex device_index,
126
+ const EventFlag flag) const override {
127
+ TORCH_CHECK(
128
+ device_index == -1 || device_index == stream.device_index(),
129
+ "Event device index ",
130
+ device_index,
131
+ " does not match recording stream's device index ",
132
+ stream.device_index(),
133
+ ".");
134
+
135
+ cudaEvent_t cuda_event = static_cast<cudaEvent_t>(*event);
136
+ CUDAStream cuda_stream{stream};
137
+
138
+ // Moves to stream's device to record
139
+ const auto orig_device = getDevice();
140
+ setDevice(stream.device());
141
+
142
+ // Creates the event (lazily)
143
+ if (!cuda_event)
144
+ createEvent(&cuda_event, flag);
145
+ C10_CUDA_CHECK(cudaEventRecord(cuda_event, cuda_stream));
146
+ // Makes the void* point to the (possibly just allocated) CUDA event
147
+ *event = cuda_event;
148
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
149
+ if (C10_UNLIKELY(interp)) {
150
+ (*interp)->trace_gpu_event_record(
151
+ reinterpret_cast<uintptr_t>(cuda_event),
152
+ reinterpret_cast<uintptr_t>(cuda_stream.stream()));
153
+ }
154
+
155
+ // Resets device
156
+ setDevice(orig_device);
157
+ }
158
+
159
+ void block(void* event, const Stream& stream) const override {
160
+ if (!event)
161
+ return;
162
+ cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
163
+ CUDAStream cuda_stream{stream};
164
+ const auto orig_device = getDevice();
165
+ setDevice(stream.device());
166
+ C10_CUDA_CHECK(cudaStreamWaitEvent(
167
+ cuda_stream,
168
+ cuda_event,
169
+ /*flags (must be zero)=*/0));
170
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
171
+ if (C10_UNLIKELY(interp)) {
172
+ (*interp)->trace_gpu_event_wait(
173
+ reinterpret_cast<uintptr_t>(cuda_event),
174
+ reinterpret_cast<uintptr_t>(cuda_stream.stream()));
175
+ }
176
+ setDevice(orig_device);
177
+ }
178
+
179
+ // May be called from any device
180
+ bool queryEvent(void* event) const override {
181
+ if (!event)
182
+ return true;
183
+ cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
184
+ const cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaEventQuery(cuda_event));
185
+ if (err != cudaErrorNotReady) {
186
+ C10_CUDA_CHECK(err);
187
+ } else {
188
+ // ignore and clear the error if not ready
189
+ (void)cudaGetLastError();
190
+ }
191
+ return (err == cudaSuccess);
192
+ }
193
+
194
+ // Stream-related functions
195
+ bool queryStream(const Stream& stream) const override {
196
+ CUDAStream cuda_stream{stream};
197
+ return cuda_stream.query();
198
+ }
199
+
200
+ void synchronizeStream(const Stream& stream) const override {
201
+ CUDAStream cuda_stream{stream};
202
+ cuda_stream.synchronize();
203
+ }
204
+
205
+ void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
206
+ const override {
207
+ CUDAStream cuda_stream{stream};
208
+ CUDACachingAllocator::recordStream(data_ptr, cuda_stream);
209
+ }
210
+ };
211
+
212
+ } // namespace c10::cuda::impl
venv/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAMacros.h>
4
+
5
+ namespace c10::cuda::impl {
6
+
7
+ C10_CUDA_API int c10_cuda_test();
8
+
9
+ }