applied-ai-018 commited on
Commit
adb2b67
·
verified ·
1 Parent(s): 8836ff2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/18.attention.dense.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/25.attention.dense.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/25.attention.dense.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step120/zero/3.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Activation.h +20 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDAJitLoops.cuh +296 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDALoops.cuh +348 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DeviceSqrt.cuh +25 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DistributionTemplates.h +672 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Distributions.h +25 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh +22 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ForeachFunctors.cuh +681 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MiscUtils.h +32 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MultiTensorApply.cuh +379 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Randperm.cuh +58 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ScanUtils.cuh +459 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sort.h +17 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortStable.h +19 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortUtils.cuh +344 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sorting.h +18 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortingRadixSelect.cuh +429 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/UniqueCub.cuh +16 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/im2col.cuh +345 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/reduction_template.cuh +680 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/AffineQuantizer.h +130 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/AffineQuantizerBase.h +47 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/ConvUtils.h +62 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/FakeQuantAffine.h +67 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/IndexKernel.h +14 -0
  31. venv/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h +176 -0
  32. venv/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h +343 -0
  33. venv/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h +287 -0
  34. venv/lib/python3.10/site-packages/torch/include/c10/util/BFloat16.h +117 -0
  35. venv/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h +67 -0
  36. venv/lib/python3.10/site-packages/torch/include/c10/util/ConstexprCrc.h +130 -0
  37. venv/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h +102 -0
  38. venv/lib/python3.10/site-packages/torch/include/c10/util/Exception.h +711 -0
  39. venv/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h +29 -0
  40. venv/lib/python3.10/site-packages/torch/include/c10/util/Flags.h +226 -0
  41. venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h +246 -0
  42. venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h +136 -0
  43. venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h +283 -0
  44. venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h +143 -0
  45. venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h +135 -0
  46. venv/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h +350 -0
  47. venv/lib/python3.10/site-packages/torch/include/c10/util/Half.h +538 -0
  48. venv/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h +77 -0
  49. venv/lib/python3.10/site-packages/torch/include/c10/util/Logging.h +340 -0
  50. venv/lib/python3.10/site-packages/torch/include/c10/util/MathConstants.h +142 -0
ckpts/universal/global_step120/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:880c8bf2d8a8098c0bd1d367029f8247d7d190c7412d38a651473393cea7d197
3
+ size 33555612
ckpts/universal/global_step120/zero/18.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db403bdd7f906c75537ccae491b271daa62d5acc8b01f529c8014daa90873a87
3
+ size 16778396
ckpts/universal/global_step120/zero/25.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61cfeb819dd1b0791f3bd4af7a9e1d2f7b6ee2f58de54b43479aca490dd1dfca
3
+ size 16778396
ckpts/universal/global_step120/zero/25.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e721d44034495f4f43bfbccf044efd249aa0748e5e63964bc91eda995f0a1585
3
+ size 16778317
ckpts/universal/global_step120/zero/3.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b37e74f29089062b04c8139d7c22d106ef7584273383e6efe35b2bbb0ba7537
3
+ size 50332843
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Activation.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/Activation.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ struct TensorIteratorBase;
7
+ class TensorBase;
8
+ }
9
+
10
+ namespace at { namespace native {
11
+
12
+ void launch_glu_backward_kernel(const TensorIteratorBase& iter,
13
+ int64_t gI_stride, int64_t I_stride);
14
+
15
+ void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter);
16
+
17
+ void GeluCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
18
+ void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
19
+
20
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDAJitLoops.cuh ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ // Jiterator functions are guarded behind this macro
5
+ #if AT_USE_JITERATOR()
6
+
7
+ #include <ATen/OpMathType.h>
8
+ #include <ATen/TensorIterator.h>
9
+ #include <ATen/core/Array.h>
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
12
+ #include <ATen/native/cuda/jit_utils.h>
13
+ #include <ATen/native/cuda/MemoryAccess.cuh>
14
+ #include <ATen/native/cuda/thread_constants.h>
15
+
16
+ #include <ATen/native/cuda/Loops.cuh>
17
+
18
+ #include <c10/macros/Macros.h>
19
+ #include <c10/core/ScalarType.h>
20
+ #include <c10/util/SmallBuffer.h>
21
+
22
+ #include <initializer_list>
23
+ #include <type_traits>
24
+ #include <tuple>
25
+ #include <mutex>
26
+
27
+ namespace at {
28
+ namespace native {
29
+
30
+ template <typename Tuple, std::size_t... I>
31
+ constexpr auto tuple_to_array_helper(Tuple& t, std::index_sequence<I...> seq) {
32
+ constexpr auto size = seq.size();
33
+ (void)t; // warning : unused parameter when tuple is empty.
34
+ return std::array<void*, size>{static_cast<void*>(&std::get<I>(t))...};
35
+ }
36
+
37
+ // Helper function convert tuple to std::array<void*, N>
38
+ // for passing the arguments to CUDA Kernel
39
+ // NOTE: We capture tuple by reference,
40
+ // so the pointers in returned array are only valid
41
+ // till tuple is alive.
42
+ template <typename ...Args>
43
+ constexpr auto tuple_to_array(std::tuple<Args...>& extra_args) {
44
+ constexpr auto tuple_size = sizeof...(Args);
45
+ return tuple_to_array_helper(extra_args, std::make_index_sequence<tuple_size>{});
46
+ }
47
+
48
+ struct JittedVecKernelCache {
49
+ // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
50
+ at::cuda::jit::NvrtcFunction vec1;
51
+ at::cuda::jit::NvrtcFunction vec2;
52
+ at::cuda::jit::NvrtcFunction vec4;
53
+ };
54
+
55
+ struct JittedKernelVariantCache {
56
+ JittedVecKernelCache vec;
57
+ at::cuda::jit::NvrtcFunction noncontiguous;
58
+ at::cuda::jit::NvrtcFunction dynamic_contiguous;
59
+ at::cuda::jit::NvrtcFunction dynamic_noncontiguous;
60
+ };
61
+
62
+ inline c10::SmallBuffer<void*, 64> pack_kernel_args(
63
+ std::initializer_list<void*> args,
64
+ c10::ArrayRef<void*> extra_args) {
65
+ c10::SmallBuffer<void*, 64> ret(args.size() + extra_args.size());
66
+ std::copy(args.begin(), args.end(), ret.data());
67
+ std::copy(extra_args.begin(), extra_args.end(), ret.data() + args.size());
68
+ return ret;
69
+ }
70
+
71
+ template<typename array_t,
72
+ typename inp_calc_t,
73
+ typename out_calc_t,
74
+ typename loader_t,
75
+ typename storer_t>
76
+ void launch_jitted_unrolled_kernel(
77
+ std::mutex &jiterator_mutex,
78
+ at::cuda::jit::NvrtcFunction &fn_cache,
79
+ const at::cuda::jit::KernelDescriptor &desc,
80
+ int64_t N,
81
+ array_t data,
82
+ inp_calc_t ic,
83
+ out_calc_t oc,
84
+ loader_t l,
85
+ storer_t s,
86
+ bool contiguous,
87
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
88
+ void* scalar_val,
89
+ c10::ArrayRef<void*> extra_args) {
90
+
91
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
92
+ //casting result to int is always safe, intermediate is int64 and won't overflow
93
+ const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
94
+
95
+ if (!fn_cache.function) {
96
+ const std::lock_guard<std::mutex> lock{jiterator_mutex};
97
+ if (!fn_cache.function) {
98
+ constexpr bool dynamic_casting = !std::is_same<decltype(l), memory::LoadWithoutCast>() ||
99
+ !std::is_same<decltype(s), memory::StoreWithoutCast>();
100
+ auto code = at::cuda::jit::generate_code(
101
+ desc, contiguous, dynamic_casting, scalar_pos);
102
+ fn_cache = at::cuda::jit::jit_pwise_function(code, desc.name);
103
+ }
104
+ }
105
+
106
+ auto args = pack_kernel_args({&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args);
107
+ at::cuda::jit::launch_jitted_pwise_function(fn_cache, args.data(), {grid, 1u, 1u},
108
+ {num_threads(), 1u, 1u});
109
+ }
110
+
111
+ template<int arity, typename array_t>
112
+ void launch_jitted_vectorized_kernel(
113
+ std::mutex &jiterator_mutex, JittedVecKernelCache &fn_cache,
114
+ const at::cuda::jit::KernelDescriptor &desc, int64_t N, array_t data,
115
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
116
+ void *scalar_val, c10::ArrayRef<void*> extra_args) {
117
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
118
+ // N is still int64_t for the computation, but it's always safe to cast result to int
119
+ const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
120
+ const int vec_size = at::cuda::jit::can_vectorize_up_to(
121
+ desc, c10::ArrayRef<char*>(data.data, data.size()));
122
+
123
+ // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
124
+ // fn_ptr is set to the appropriate function based on the vec size and GPU used
125
+ at::cuda::jit::NvrtcFunction* fn_ptr;
126
+ if (vec_size == 4) {
127
+ fn_ptr = &fn_cache.vec4;
128
+ } else if (vec_size == 2) {
129
+ fn_ptr = &fn_cache.vec2;
130
+ } else if (vec_size ==1) {
131
+ fn_ptr = &fn_cache.vec1;
132
+ } else {
133
+ TORCH_INTERNAL_ASSERT(false, "unexpected vec_size for jitter vectorized kernel");
134
+ }
135
+
136
+ bool vectorized = vec_size > 1;
137
+
138
+ if (!fn_ptr->function) {
139
+ const std::lock_guard<std::mutex> lock{jiterator_mutex};
140
+ if (!fn_ptr->function) { // cache miss!
141
+
142
+ // Generates program
143
+ auto code = at::cuda::jit::generate_code(
144
+ desc, /*contiguous=*/true, /*dynamic_casting=*/false,
145
+ scalar_pos, vectorized, vec_size);
146
+ std::string kernel_name = vectorized ? desc.name + "_vectorized" + std::to_string(vec_size) : desc.name;
147
+
148
+ // Acquires the program
149
+ *fn_ptr = at::cuda::jit::jit_pwise_function(code, kernel_name);
150
+ }
151
+ }
152
+
153
+ if (vectorized) {
154
+ auto args = pack_kernel_args({&N, &data, scalar_val}, extra_args);
155
+ at::cuda::jit::launch_jitted_pwise_function(
156
+ *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
157
+ } else {
158
+ // NVCC complains about unused variables l and s.
159
+ // It should be false positive in most cases, so we suppress the warnings.
160
+ #pragma nv_diagnostic push
161
+ #pragma nv_diag_suppress 177
162
+ auto ic = TrivialOffsetCalculator<arity>();
163
+ auto oc = TrivialOffsetCalculator<1>();
164
+ auto l = memory::LoadWithoutCast();
165
+ auto s = memory::StoreWithoutCast();
166
+
167
+ auto args = pack_kernel_args(
168
+ {&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args);
169
+ at::cuda::jit::launch_jitted_pwise_function(
170
+ *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
171
+ #pragma nv_diagnostic pop
172
+ }
173
+ }
174
+
175
+ template <int arity>
176
+ void jitted_gpu_kernel_generic(
177
+ std::mutex &jiterator_mutex,
178
+ JittedKernelVariantCache &cache,
179
+ const at::cuda::jit::KernelDescriptor &desc,
180
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
181
+ c10::ArrayRef<void*> extra_args,
182
+ TensorIteratorBase& iter,
183
+ const bool dynamic_casting,
184
+ void *scalar_val) {
185
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
186
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == arity);
187
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
188
+
189
+ constexpr int ntensors = arity + 1;
190
+ at::detail::Array<char*, ntensors> data;
191
+ for (auto i : c10::irange(ntensors)) {
192
+ data[i] = (char*)iter.data_ptr(i);
193
+ }
194
+
195
+ int64_t numel = iter.numel();
196
+ bool contiguous = iter.is_contiguous();
197
+
198
+ // Decides which of 4 kernel types to launch
199
+ // Variations are:
200
+ // - Case 1: no dynamic casting and contiguous
201
+ // - Case 2: no dynamic casting and noncontiguous
202
+ // - Case 3: dynamic casting and contiguous
203
+ // - Case 4: dynamic casting and noncontiguous
204
+ // These cases align with the non-jitted CUDALoops.cuh cases in gpu_kernel_impl
205
+
206
+ if (!dynamic_casting) {
207
+ if (contiguous) {
208
+ // Case 1: no dynamic casting and contiguous
209
+ launch_jitted_vectorized_kernel<arity>(
210
+ jiterator_mutex, cache.vec, desc,
211
+ numel, data, scalar_pos, scalar_val, extra_args);
212
+ return;
213
+ }
214
+
215
+ // Case 2: no dynamic casting and noncontiguous
216
+ auto input_offset_calculator = make_input_offset_calculator<arity>(iter);
217
+ auto output_offset_calculator = make_output_offset_calculator(iter);
218
+ auto loader = memory::LoadWithoutCast();
219
+ auto storer = memory::StoreWithoutCast();
220
+ launch_jitted_unrolled_kernel(
221
+ jiterator_mutex, cache.noncontiguous, desc, numel, data,
222
+ input_offset_calculator, output_offset_calculator, loader,
223
+ storer, contiguous, scalar_pos, scalar_val, extra_args);
224
+ return;
225
+ }
226
+
227
+ // Cases 3 and 4 are handled below
228
+ // Both require construction of a storer (this asserts 1 output) and one or more loaders
229
+
230
+ // Creates store cast to output (the zeroth tensor in TensorIterator)
231
+ auto storer = memory::StoreWithCast<1>(iter);
232
+
233
+ // Creates load casts from inputs (note offset indexing into the iterators 1...n tensors)
234
+ auto loader = memory::LoadWithCast<arity>(iter);
235
+
236
+ if (contiguous) {
237
+ // Case 3: dynamic casting and contiguous
238
+ auto input_offset_calculator = TrivialOffsetCalculator<arity>();
239
+ auto output_offset_calculator = TrivialOffsetCalculator<1>();
240
+ launch_jitted_unrolled_kernel(
241
+ jiterator_mutex, cache.dynamic_contiguous, desc, numel, data, input_offset_calculator,
242
+ output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args);
243
+ return;
244
+ }
245
+
246
+ // Case 4: dynamic casting and noncontiguous
247
+ auto input_offset_calculator = make_input_offset_calculator<arity>(iter);
248
+ auto output_offset_calculator = make_output_offset_calculator(iter);
249
+ launch_jitted_unrolled_kernel(
250
+ jiterator_mutex, cache.dynamic_noncontiguous, desc, numel, data, input_offset_calculator,
251
+ output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args);
252
+ }
253
+
254
+ // NOTE: static to reduce chances of name collision.
255
+ template <
256
+ char const* name,
257
+ typename result_type,
258
+ typename f_inputs_type,
259
+ int arity,
260
+ at::cuda::jit::BinaryFuncVariant scalar_pos =
261
+ at::cuda::jit::BinaryFuncVariant::NoScalar,
262
+ typename... ExtraArgs>
263
+ static void jitted_gpu_kernel_impl(
264
+ TensorIteratorBase& iter,
265
+ const std::string &f,
266
+ const bool dynamic_casting,
267
+ at::opmath_type<f_inputs_type> scalar_val,
268
+ std::tuple<ExtraArgs...> extra_args) {
269
+
270
+ // TODO: Memory use can probably be optimized by re-using kernels across GPUs with
271
+ // the same compute capability
272
+ static std::mutex jiterator_mutex;
273
+ static std::vector<JittedKernelVariantCache> device_caches(c10::cuda::device_count());
274
+
275
+ constexpr int nInputs = arity;
276
+ constexpr int nOutputs = 1; // TODO: Support more than 1 output
277
+ static const auto desc = at::cuda::jit::make_kernel_descriptor<
278
+ result_type, f_inputs_type, ExtraArgs...>(name, f, nInputs, nOutputs);
279
+
280
+ auto &cache = device_caches[iter.device().index()];
281
+ auto extra_args_array = tuple_to_array(extra_args);
282
+ return jitted_gpu_kernel_generic<arity>(
283
+ jiterator_mutex,
284
+ cache,
285
+ desc,
286
+ scalar_pos,
287
+ extra_args_array,
288
+ iter,
289
+ dynamic_casting,
290
+ &scalar_val
291
+ );
292
+ }
293
+
294
+ }} // at::native
295
+
296
+ #endif // AT_USE_JITERATOR()
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDALoops.cuh ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides two functions to help write GPU elementwise kernels:
4
+ //
5
+ // gpu_kernel(TensorIterator iter, <lambda>)
6
+ // gpu_kernel_with_scalars(TensorIterator iter, <lambda>)
7
+ //
8
+ // The gpu_kernel_with_scalars generates specializations that support a
9
+ // single scalar CPU argument, such as from `cuda_tensor + 5`. The CPU scalar
10
+ // is lifted to a kernel parameter instead of copying to device memory.
11
+ // This should be used in conjunction with TensorIterator::allow_cpu_scalars_,
12
+ // which is the default for TensorIterator::binary_op. Otherwise, all inputs
13
+ // and the output must be on the GPU.
14
+ //
15
+ // For example, to write a reciprocal kernel for GPU float Tensors:
16
+ //
17
+ // gpu_kernel(iter, []GPU_LAMBDA(float a) {
18
+ // return 1.0f / a;
19
+ // });
20
+ //
21
+ // To write a multiplication kernel for GPU float Tensors where one argument
22
+ // may be a CPU scalar:
23
+ //
24
+ // gpu_kernel_with_scalars(iter, []GPU_LAMBDA(float a, float b) {
25
+ // return a * b;
26
+ // });
27
+ //
28
+ // See BinaryOpsKernel.cu for the complete implementation
29
+ //
30
+
31
+ #include <iostream>
32
+ #include <tuple>
33
+ #include <type_traits>
34
+
35
+ #include <ATen/core/Array.h>
36
+ #include <ATen/cuda/CUDAContext.h>
37
+ #include <ATen/detail/FunctionTraits.h>
38
+ #include <ATen/native/TensorIterator.h>
39
+ #include <c10/core/DynamicCast.h>
40
+ #include <c10/core/ScalarType.h>
41
+ #include <c10/macros/Macros.h>
42
+ #include <c10/util/TypeCast.h>
43
+
44
+ #ifdef __NVCC__
45
+ #define ASSERT_HOST_DEVICE_LAMBDA(type) \
46
+ static_assert( \
47
+ __nv_is_extended_host_device_lambda_closure_type(type), \
48
+ #type " must be a __host__ __device__ lambda")
49
+ #else
50
+ #define ASSERT_HOST_DEVICE_LAMBDA(type)
51
+ #endif
52
+
53
+ namespace at {
54
+ namespace native {
55
+
56
+ template <int vec_size, typename func_t, typename array_t>
57
+ C10_LAUNCH_BOUNDS_1(num_threads())
58
+ __global__ void vectorized_elementwise_kernel(int N, func_t f, array_t data) {
59
+ using traits = function_traits<func_t>;
60
+ int remaining = N - block_work_size() * blockIdx.x;
61
+
62
+ if (remaining < block_work_size()) { // if this block handles the reminder,
63
+ // just do a naive unrolled loop
64
+ auto input_calc = TrivialOffsetCalculator<traits::arity>();
65
+ auto output_calc = TrivialOffsetCalculator<1>();
66
+ auto loader = memory::LoadWithoutCast();
67
+ auto storer = memory::StoreWithoutCast();
68
+ auto policy = memory::policies::unroll<
69
+ array_t,
70
+ decltype(input_calc),
71
+ decltype(output_calc),
72
+ memory::LoadWithoutCast,
73
+ memory::StoreWithoutCast>(
74
+ data, remaining, input_calc, output_calc, loader, storer);
75
+ elementwise_kernel_helper(f, policy);
76
+ } else { // if this block has a full `block_work_size` data to handle, use
77
+ // vectorized memory access
78
+ elementwise_kernel_helper(
79
+ f, memory::policies::vectorized<vec_size, array_t>(data));
80
+ }
81
+ }
82
+
83
+ template <
84
+ typename func_t,
85
+ typename array_t,
86
+ typename inp_calc_t,
87
+ typename out_calc_t,
88
+ typename loader_t,
89
+ typename storer_t>
90
+ C10_LAUNCH_BOUNDS_1(num_threads())
91
+ __global__ void unrolled_elementwise_kernel(
92
+ int N,
93
+ func_t f,
94
+ array_t data,
95
+ inp_calc_t ic,
96
+ out_calc_t oc,
97
+ loader_t l,
98
+ storer_t s) {
99
+ int remaining = N - block_work_size() * blockIdx.x;
100
+ auto policy = memory::policies::
101
+ unroll<array_t, inp_calc_t, out_calc_t, loader_t, storer_t>(
102
+ data, remaining, ic, oc, l, s);
103
+ elementwise_kernel_helper(f, policy);
104
+ }
105
+
106
+ // this function assume trivial 1d and no dynamic casting
107
+ template <typename func_t, typename array_t>
108
+ static inline void launch_vectorized_kernel(
109
+ int64_t N,
110
+ const func_t& f,
111
+ array_t data) {
112
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
113
+ using traits = function_traits<func_t>;
114
+ int64_t grid = (N + block_work_size() - 1) / block_work_size();
115
+ auto stream = at::cuda::getCurrentCUDAStream();
116
+ int vec_size = memory::can_vectorize_up_to<func_t>(data);
117
+
118
+ switch (vec_size) {
119
+ case 4:
120
+ vectorized_elementwise_kernel<4, func_t, array_t>
121
+ <<<grid, num_threads(), 0, stream>>>(N, f, data);
122
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
123
+ break;
124
+ case 2:
125
+ vectorized_elementwise_kernel<2, func_t, array_t>
126
+ <<<grid, num_threads(), 0, stream>>>(N, f, data);
127
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
128
+ break;
129
+ case 1: {
130
+ auto input_calc = TrivialOffsetCalculator<traits::arity>();
131
+ auto output_calc = TrivialOffsetCalculator<1>();
132
+ auto loader = memory::LoadWithoutCast();
133
+ auto storer = memory::StoreWithoutCast();
134
+ unrolled_elementwise_kernel<func_t, array_t>
135
+ <<<grid, num_threads(), 0, stream>>>(
136
+ N, f, data, input_calc, output_calc, loader, storer);
137
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
138
+ break;
139
+ }
140
+ default:
141
+ TORCH_INTERNAL_ASSERT(false, "Unexpected vectorization size");
142
+ }
143
+ }
144
+
145
+ template <
146
+ typename func_t,
147
+ typename array_t,
148
+ typename inp_calc_t,
149
+ typename out_calc_t,
150
+ typename loader_t,
151
+ typename storer_t>
152
+ static inline void launch_unrolled_kernel(
153
+ int64_t N,
154
+ const func_t& f,
155
+ array_t data,
156
+ inp_calc_t ic,
157
+ out_calc_t oc,
158
+ loader_t l,
159
+ storer_t s) {
160
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
161
+ int64_t grid = (N + block_work_size() - 1) / block_work_size();
162
+ auto stream = at::cuda::getCurrentCUDAStream();
163
+ unrolled_elementwise_kernel<func_t, array_t>
164
+ <<<grid, num_threads(), 0, stream>>>(N, f, data, ic, oc, l, s);
165
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
166
+ }
167
+
168
+ template <int nt, int vt, typename func_t>
169
+ C10_LAUNCH_BOUNDS_2(nt, 4)
170
+ __global__ void elementwise_kernel(int N, func_t f) {
171
+ int tid = threadIdx.x;
172
+ int nv = nt * vt;
173
+ int idx = nv * blockIdx.x + tid;
174
+ #pragma unroll
175
+ for (int i = 0; i < vt; i++) {
176
+ if (idx < N) {
177
+ f(idx);
178
+ idx += nt;
179
+ }
180
+ }
181
+ }
182
+
183
+ template <int nt, int vt, typename func_t>
184
+ static void launch_legacy_kernel(int64_t N, const func_t& f) {
185
+ TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
186
+ if (N == 0) {
187
+ return;
188
+ }
189
+ dim3 block(nt);
190
+ dim3 grid((N + block.x * vt - 1) / (block.x * vt));
191
+ auto stream = at::cuda::getCurrentCUDAStream();
192
+ elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
193
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
194
+ }
195
+
196
+ template <typename traits, typename func_t, typename index_t, size_t... INDEX>
197
+ C10_HOST_DEVICE typename traits::result_type invoke_impl(
198
+ const func_t& f,
199
+ char* const C10_RESTRICT data[],
200
+ const index_t strides[],
201
+ int i,
202
+ std::index_sequence<INDEX...>) {
203
+ (void)strides;
204
+ (void)i;
205
+ return f(c10::load<typename traits::template arg<INDEX>::type>(
206
+ data[INDEX] + i * strides[INDEX])...);
207
+ }
208
+
209
+ template <
210
+ typename func_t,
211
+ typename index_t,
212
+ typename traits = function_traits<func_t>>
213
+ C10_HOST_DEVICE typename traits::result_type invoke(
214
+ const func_t& f,
215
+ char* const C10_RESTRICT data[],
216
+ const index_t strides[],
217
+ int i) {
218
+ using Indices = std::make_index_sequence<traits::arity>;
219
+ return invoke_impl<traits>(f, data, strides, i, Indices{});
220
+ }
221
+
222
+ template <typename traits, typename func_t, typename index_t, size_t... I>
223
+ C10_HOST_DEVICE typename traits::result_type invoke_impl(
224
+ const func_t& f,
225
+ char* const C10_RESTRICT data[],
226
+ const index_t strides[],
227
+ const ScalarType dtypes[],
228
+ int i,
229
+ std::index_sequence<I...>) {
230
+ (void)strides;
231
+ (void)i;
232
+ return f(c10::fetch_and_cast<typename traits::template arg<I>::type>(
233
+ dtypes[I], data[I] + i * strides[I])...);
234
+ }
235
+
236
+ template <
237
+ typename func_t,
238
+ typename index_t,
239
+ typename traits = function_traits<func_t>>
240
+ C10_HOST_DEVICE typename traits::result_type invoke(
241
+ const func_t& f,
242
+ char* const C10_RESTRICT data[],
243
+ const index_t strides[],
244
+ const ScalarType dtypes[],
245
+ int i) {
246
+ using Indices = std::make_index_sequence<traits::arity>;
247
+ return invoke_impl<traits>(f, data, strides, dtypes, i, Indices{});
248
+ }
249
+
250
+ template <typename func_t>
251
+ void gpu_kernel_impl_nocast(TensorIteratorBase& iter, const func_t& f) {
252
+ using traits = function_traits<func_t>;
253
+ using arg0_t = typename traits::result_type;
254
+ constexpr int ntensors = traits::arity + 1;
255
+
256
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
257
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
258
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
259
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
260
+
261
+ at::detail::Array<char*, ntensors> data;
262
+ for (int i = 0; i < ntensors; i++) {
263
+ data[i] = (char*)iter.data_ptr(i);
264
+ }
265
+
266
+ int64_t numel = iter.numel();
267
+
268
+ bool contiguous = iter.is_contiguous();
269
+
270
+ if (contiguous) {
271
+ return launch_vectorized_kernel(numel, f, data);
272
+ }
273
+ auto offset_calc = ::make_offset_calculator<traits::arity + 1>(iter);
274
+ constexpr int unroll_factor = sizeof(arg0_t) >= 4 ? 2 : 4;
275
+ launch_legacy_kernel<128, unroll_factor>(numel, [=] GPU_LAMBDA(int idx) {
276
+ auto offsets = offset_calc.get(idx);
277
+ arg0_t* out = (arg0_t*)(data[0] + offsets[0]);
278
+ *out = invoke(f, &data.data[1], &offsets.data[1], 1);
279
+ });
280
+ }
281
+
282
+ template <typename func_t>
283
+ void gpu_kernel_impl(TensorIteratorBase& iter, const func_t& f) {
284
+ if (!needs_dynamic_casting<func_t>::check(iter)) {
285
+ return gpu_kernel_impl_nocast(iter, f);
286
+ }
287
+ using traits = function_traits<func_t>;
288
+ using arg0_t = typename traits::result_type;
289
+ constexpr int ntensors = traits::arity + 1;
290
+
291
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
292
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
293
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
294
+
295
+ at::detail::Array<char*, ntensors> data;
296
+ for (int i = 0; i < ntensors; i++) {
297
+ data[i] = (char*)iter.data_ptr(i);
298
+ }
299
+
300
+ int64_t numel = iter.numel();
301
+
302
+ bool contiguous = iter.is_contiguous();
303
+
304
+ if (contiguous) {
305
+ #ifdef USE_ROCM
306
+ at::detail::Array<ScalarType, ntensors> dtypes;
307
+ auto inner_strides = iter.get_inner_strides();
308
+ at::detail::Array<int, ntensors> strides;
309
+ for (int i = 0; i < ntensors; i++) {
310
+ dtypes[i] = iter.dtype(i);
311
+ strides[i] = inner_strides[i];
312
+ }
313
+ launch_legacy_kernel<512, 1>(numel, [=]GPU_LAMBDA(int idx) {
314
+ void* out = data[0] + strides[0] * idx;
315
+ arg0_t result = invoke(f, &data.data[1], &strides.data[1], &dtypes.data[1], idx);
316
+ c10::cast_and_store<arg0_t>(dtypes[0], out, result);
317
+ });
318
+ #else
319
+ auto loader = memory::LoadWithCast<traits::arity>(iter);
320
+ auto storer = memory::StoreWithCast<1>(iter);
321
+ auto input_offset_calculator = TrivialOffsetCalculator<traits::arity>();
322
+ auto output_offset_calculator = TrivialOffsetCalculator<1>();
323
+ launch_unrolled_kernel(
324
+ numel,
325
+ f,
326
+ data,
327
+ input_offset_calculator,
328
+ output_offset_calculator,
329
+ loader,
330
+ storer);
331
+ #endif
332
+ } else {
333
+ at::detail::Array<ScalarType, ntensors> dtypes;
334
+ for (int i = 0; i < ntensors; i++) {
335
+ dtypes[i] = iter.dtype(i);
336
+ }
337
+ auto offset_calc = ::make_offset_calculator<traits::arity + 1>(iter);
338
+ launch_legacy_kernel<128, 4>(numel, [=] GPU_LAMBDA(int idx) {
339
+ auto offsets = offset_calc.get(idx);
340
+ void* out = data[0] + offsets[0];
341
+ arg0_t result = invoke(f, &data.data[1], &offsets.data[1], &dtypes.data[1], 1);
342
+ c10::cast_and_store<arg0_t>(dtypes[0], out, result);
343
+ });
344
+ }
345
+ }
346
+
347
+ } // namespace native
348
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DeviceSqrt.cuh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at { namespace native {
4
+ #if defined(USE_ROCM)
5
+ // take these out when ROCm implements std:: math functions
6
+ #include <math.h>
7
+ template <typename scalar_t>
8
+ static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
9
+
10
+ template <>
11
+ __forceinline__ __device__ float device_sqrt(float val) {
12
+ return ::sqrtf(val);
13
+ }
14
+
15
+ template <>
16
+ __forceinline__ __device__ double device_sqrt(double val) {
17
+ return ::sqrt(val);
18
+ }
19
+ #else
20
+ template<typename scalar_t>
21
+ __forceinline__ __device__ double device_sqrt(scalar_t val) {
22
+ return std::sqrt(val);
23
+ }
24
+ #endif
25
+ }}
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DistributionTemplates.h ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/AccumulateType.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/Dispatch_v2.h>
6
+ #include <ATen/ExpandBase.h>
7
+ #include <ATen/OpMathType.h>
8
+ #include <ATen/native/TensorIterator.h>
9
+ #include <ATen/native/cuda/Loops.cuh>
10
+ #include <c10/util/Half.h>
11
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
12
+ #include <ATen/cuda/CUDAContext.h>
13
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
14
+ #include <ATen/cuda/CUDAGraphsUtils.cuh>
15
+ #include <ATen/detail/FunctionTraits.h>
16
+ #include <ATen/core/DistributionsHelper.h>
17
+
18
+ #include <curand.h>
19
+ #include <curand_kernel.h>
20
+ #include <curand_philox4x32_x.h>
21
+ #include <cstdint>
22
+ #include <limits>
23
+ #include <utility>
24
+ #include <mutex>
25
+ #include <tuple>
26
+ #include <type_traits>
27
+
28
+ namespace at {
29
+ namespace native {
30
+ namespace {
31
+
32
+ // launch bounds used for kernels utilizing TensorIterator
33
+ const uint32_t block_size_bound = 256;
34
+ const uint32_t grid_size_bound = 4;
35
+ // number of randoms given by distributions like curand_uniform4, curand_uniform2_double
36
+ // used in calculating philox offset.
37
+ const uint32_t curand4_engine_calls = 4;
38
+
39
+ // utility function that calculates proper philox_offset
40
+ // for distributions utilizing TensorIterator. For distributions using
41
+ // TensorIterator, we are using a grid-stride loop with each
42
+ // thread yielding one element per thread. For the edge of the grid-stride
43
+ // loop, if the tensor size is large, the unroll loop will kick in and the float4
44
+ // from curand4 will start getting utilized (for common tensor sizes, we end up
45
+ // using rand.x from each thread). Hence, the philox_offset is
46
+ // (number of elements per thread * number of engine calls), which makes
47
+ // sure that philox offset increment is not less than the number of randoms used
48
+ // in each thread.
49
+ std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
50
+ const uint64_t numel = static_cast<uint64_t>(total_elements);
51
+ const uint32_t block_size = block_size_bound;
52
+ const uint32_t unroll = curand4_engine_calls;
53
+ dim3 dim_block(block_size);
54
+ dim3 grid((numel + block_size - 1) / block_size);
55
+ uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
56
+ grid.x = std::min(
57
+ static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
58
+ grid.x);
59
+ //number of times random will be generated per thread, to offset philox counter in thc random state
60
+ uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
61
+ * curand4_engine_calls;
62
+ return std::make_tuple(counter_offset, grid, dim_block);
63
+ }
64
+
65
+ // grid stride loop kernel for distributions
66
+ template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
67
+ C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
68
+ __global__ void distribution_elementwise_grid_stride_kernel(int numel,
69
+ PhiloxCudaState philox_args,
70
+ const dist_t dist_func,
71
+ const transform_t transform_func) {
72
+ auto seeds = at::cuda::philox::unpack(philox_args);
73
+ int idx = blockIdx.x * blockDim.x + threadIdx.x;
74
+ curandStatePhilox4_32_10_t state;
75
+ curand_init(std::get<0>(seeds),
76
+ idx,
77
+ std::get<1>(seeds),
78
+ &state);
79
+
80
+ int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
81
+ blockDim.x * gridDim.x * unroll_factor;
82
+ for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
83
+ auto rand = dist_func(&state);
84
+ #pragma unroll
85
+ for (int ii = 0; ii < unroll_factor; ii++) {
86
+ int li = linear_index + blockDim.x * gridDim.x * ii;
87
+ if (li < numel) {
88
+ transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
89
+ }
90
+ }
91
+ __syncthreads();
92
+ }
93
+ }
94
+
95
+ /**
96
+ * distribution_nullary_kernel is analogous to gpu_kernel in
97
+ * ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses
98
+ * TensorIterator to launch a kernel. However, the differences are
99
+ * - it launches a grid-stride loop based kernel. The kernel is not
100
+ * generic like elementwise_kernel in Loops.cuh and is specialized
101
+ * for the distribution kernels here.
102
+ * - For big size tensors, we can launch multiple kernels recursively
103
+ * (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
104
+ * offset calculation is done in this function.
105
+ *
106
+ * FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
107
+ * to have grid-stride loop kernel and then use that to launch our distribution
108
+ * kernels? Note that we need a grid-stride loop kernel because, we found by testing
109
+ * that it achieves peak effective bandwidth.
110
+ */
111
+ template<typename scalar_t,
112
+ typename accscalar_t,
113
+ int unroll_factor,
114
+ typename RNG,
115
+ typename dist_t,
116
+ typename transform_t>
117
+ void distribution_nullary_kernel(at::TensorIteratorBase& iter,
118
+ RNG gen,
119
+ const dist_t& dist_func,
120
+ const transform_t transform_func) {
121
+ static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
122
+ int64_t numel = iter.numel();
123
+ if (numel == 0) {
124
+ return;
125
+ }
126
+
127
+ auto execution_policy = calc_execution_policy(numel);
128
+ auto counter_offset = std::get<0>(execution_policy);
129
+ auto grid = std::get<1>(execution_policy);
130
+ auto block = std::get<2>(execution_policy);
131
+ PhiloxCudaState rng_engine_inputs;
132
+ {
133
+ // See Note [Acquire lock when using random generators]
134
+ std::lock_guard<std::mutex> lock(gen->mutex_);
135
+ rng_engine_inputs = gen->philox_cuda_state(counter_offset);
136
+ }
137
+
138
+ if (!iter.can_use_32bit_indexing()) {
139
+ for (auto& sub_iter : iter.with_32bit_indexing()) {
140
+ distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
141
+ gen, dist_func, transform_func);
142
+ }
143
+ return;
144
+ }
145
+
146
+ char* out_data = (char*)iter.data_ptr(0);
147
+
148
+ auto stream = at::cuda::getCurrentCUDAStream();
149
+ if (iter.is_trivial_1d()) {
150
+ auto strides = iter.get_inner_strides();
151
+ int stride0 = strides[0];
152
+ distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
153
+ numel,
154
+ rng_engine_inputs,
155
+ dist_func,
156
+ [=]__device__(int idx, accscalar_t rand) {
157
+ scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
158
+ *out = transform_func(rand);
159
+ }
160
+ );
161
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
162
+ } else {
163
+ auto offset_calc = make_offset_calculator<1>(iter);
164
+ distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
165
+ numel,
166
+ rng_engine_inputs,
167
+ dist_func,
168
+ [=]__device__(int idx, accscalar_t rand) {
169
+ auto offsets = offset_calc.get(idx);
170
+ scalar_t* out = (scalar_t*)&out_data[offsets[0]];
171
+ *out = transform_func(rand);
172
+ }
173
+ );
174
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
175
+ }
176
+ }
177
+
178
+ // Binary kernel
179
+ template <typename func_t, typename inp_offset_calc_t, typename out_offset_calc_t>
180
+ __global__ void distribution_binary_elementwise_kernel(
181
+ int numel,
182
+ func_t f,
183
+ PhiloxCudaState philox_args,
184
+ typename function_traits<func_t>::result_type *output_data,
185
+ const typename function_traits<func_t>::template arg<1>::type *input_data_1,
186
+ const typename function_traits<func_t>::template arg<2>::type *input_data_2,
187
+ inp_offset_calc_t inp_calc,
188
+ out_offset_calc_t out_calc) {
189
+ auto seeds = at::cuda::philox::unpack(philox_args);
190
+
191
+ using input_t_1 = typename function_traits<func_t>::template arg<1>::type;
192
+ using input_t_2 = typename function_traits<func_t>::template arg<2>::type;
193
+
194
+ input_t_1 inputs_1[thread_work_size()];
195
+ input_t_2 inputs_2[thread_work_size()];
196
+
197
+ int base_index = block_work_size() * blockIdx.x;
198
+ int remaining = std::min<int>(numel - base_index, block_work_size());
199
+
200
+ curandStatePhilox4_32_10_t state;
201
+ curand_init(std::get<0>(seeds),
202
+ blockIdx.x * blockDim.x + threadIdx.x,
203
+ std::get<1>(seeds),
204
+ &state);
205
+
206
+ // load data into registers
207
+ int thread_idx = threadIdx.x;
208
+ #pragma unroll
209
+ for (int i = 0; i < thread_work_size(); i++) {
210
+ if (thread_idx >= remaining) {
211
+ break;
212
+ }
213
+ int input_idx = thread_idx + base_index;
214
+ auto offsets = inp_calc.get(input_idx);
215
+ inputs_1[i] = input_data_1[offsets[0]];
216
+ inputs_2[i] = input_data_2[offsets[1]];
217
+
218
+ thread_idx += num_threads();
219
+ }
220
+
221
+ // compute and store
222
+ thread_idx = threadIdx.x;
223
+ #pragma unroll
224
+ for (int i = 0; i < thread_work_size(); i++) {
225
+ if (thread_idx >= remaining) {
226
+ break;
227
+ }
228
+ int input_idx = thread_idx + base_index;
229
+ auto offsets = out_calc.get(input_idx);
230
+ output_data[offsets[0]] = f(state, inputs_1[i], inputs_2[i]);
231
+ thread_idx += num_threads();
232
+ }
233
+ }
234
+
235
+ template <typename func_t>
236
+ void distribution_binary_kernel(TensorIteratorBase &iter, PhiloxCudaState philox_args, const func_t &f) {
237
+ static_assert(std::is_same<typename function_traits<func_t>::template arg<0>::type, curandStatePhilox4_32_10_t&>::value, "the first argument of functor must be curandStatePhilox4_32_10_t");
238
+ using input_t_1 = typename function_traits<func_t>::template arg<1>::type;
239
+ using input_t_2 = typename function_traits<func_t>::template arg<2>::type;
240
+ using output_t = typename function_traits<func_t>::result_type;
241
+
242
+ if (!iter.can_use_32bit_indexing()) {
243
+ for (auto& sub_iter : iter.with_32bit_indexing()) {
244
+ distribution_binary_kernel(sub_iter, philox_args, f);
245
+ }
246
+ return;
247
+ }
248
+
249
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(iter.can_use_32bit_indexing());
250
+
251
+ int64_t numel = iter.numel();
252
+ if (numel == 0) {
253
+ return;
254
+ }
255
+
256
+ output_t *output_data = static_cast<output_t *>(iter.data_ptr(0));
257
+ const input_t_1 *input_data_1 = static_cast<const input_t_1 *>(iter.data_ptr(1));
258
+ const input_t_2 *input_data_2 = static_cast<const input_t_2 *>(iter.data_ptr(2));
259
+
260
+ int64_t grid = (numel + block_work_size() - 1) / block_work_size();
261
+ auto stream = at::cuda::getCurrentCUDAStream();
262
+
263
+ if (iter.is_contiguous()) {
264
+ distribution_binary_elementwise_kernel<<<grid,num_threads(), 0, stream>>>(
265
+ numel, f, philox_args, output_data, input_data_1, input_data_2,
266
+ TrivialOffsetCalculator<2>(), TrivialOffsetCalculator<1>());
267
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
268
+ } else {
269
+ distribution_binary_elementwise_kernel<<<grid, num_threads(), 0, stream>>>(
270
+ numel, f, philox_args, output_data, input_data_1, input_data_2,
271
+ make_input_offset_calculator<2>(iter), make_output_offset_calculator(iter));
272
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
273
+ }
274
+ }
275
+
276
+ } // namespace
277
+ }} // namespace at::native
278
+
279
+
280
+ namespace at {
281
+ namespace native {
282
+ namespace templates {
283
+ namespace cuda {
284
+
285
+ // ==================================================== Random ========================================================
286
+
287
+ template<typename RNG>
288
+ void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG gen) {
289
+ AT_DISPATCH_V2(iter.dtype(), "random_from_to_kernel_cuda", AT_WRAP([&] {
290
+ if ((
291
+ std::is_same<scalar_t, int64_t>::value ||
292
+ std::is_same<scalar_t, double>::value ||
293
+ std::is_same<scalar_t, float>::value ||
294
+ std::is_same<scalar_t, at::BFloat16>::value) && range >= 1ULL << 32)
295
+ {
296
+ // define lambda to mod with range and add base
297
+ auto random_func = [range, base] __device__ (uint64_t rand) {
298
+ return transformation::uniform_int_from_to<scalar_t>(rand, range, base);
299
+ };
300
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
301
+ gen,
302
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
303
+ ulonglong2 ret;
304
+ uint4 rand_val = curand4(state);
305
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
306
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
307
+ return ret;
308
+ },
309
+ random_func);
310
+ } else {
311
+ auto random_func = [range, base] __device__ (uint32_t rand) {
312
+ return transformation::uniform_int_from_to<scalar_t>(rand, range, base);
313
+ };
314
+ distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
315
+ gen,
316
+ [] __device__ (curandStatePhilox4_32_10_t* state) {
317
+ return curand4(state);
318
+ },
319
+ random_func);
320
+ }
321
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, kBFloat16, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
322
+ }
323
+
324
+ // This is the special kernel to handle single specific case:
325
+ // from(inclusive) = std::numeric_limits<int64_t>::lowest()
326
+ // to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
327
+ template<typename RNG>
328
+ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) {
329
+ AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel_cuda", [&] {
330
+ if (std::is_same<scalar_t, int64_t>::value ||
331
+ std::is_same<scalar_t, double>::value ||
332
+ std::is_same<scalar_t, float>::value ||
333
+ std::is_same<scalar_t, at::BFloat16>::value) {
334
+ auto random_func = [] __device__ (uint64_t rand) {
335
+ return transformation::uniform_int_full_range<scalar_t>(rand);
336
+ };
337
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
338
+ gen,
339
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
340
+ ulonglong2 ret;
341
+ uint4 rand_val = curand4(state);
342
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
343
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
344
+ return ret;
345
+ },
346
+ random_func);
347
+ } else {
348
+ TORCH_CHECK(false, "random_full_64_bits_range_kernel_cuda handles only int64, double, float and bfloat16");
349
+ }
350
+ });
351
+ }
352
+
353
+ template<typename RNG>
354
+ struct RandomFromToKernel {
355
+ void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
356
+ random_from_to_kernel(iter, range, base, check_generator<RNG>(gen));
357
+ }
358
+ void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
359
+ random_full_64_bits_range_kernel(iter, check_generator<RNG>(gen));
360
+ }
361
+ };
362
+
363
+ template<typename RNG>
364
+ void random_kernel(TensorIteratorBase& iter, RNG gen) {
365
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "random_kernel_cuda", [&] {
366
+ if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
367
+ auto random_func = [] __device__ (uint64_t rand) {
368
+ return transformation::uniform_int<scalar_t>(rand);
369
+ };
370
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter, gen,
371
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
372
+ ulonglong2 ret;
373
+ uint4 rand_val = curand4(state);
374
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
375
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
376
+ return ret;
377
+ },
378
+ random_func);
379
+ } else {
380
+ auto random_func = [] __device__ (uint32_t rand) {
381
+ return transformation::uniform_int<scalar_t>(rand);
382
+ };
383
+ distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
384
+ gen,
385
+ [] __device__ (curandStatePhilox4_32_10_t* state) {
386
+ return curand4(state);
387
+ },
388
+ random_func);
389
+ }
390
+ });
391
+ }
392
+
393
+ template<typename RNG>
394
+ struct RandomKernel {
395
+ void operator()(TensorIteratorBase& iter, RNG gen) {
396
+ random_kernel(iter, gen);
397
+ }
398
+ };
399
+
400
+ // ====================================================================================================================
401
+
402
+ template<typename scalar_t, typename accscalar_t, size_t curand4_engine_calls, typename RNG, typename transform_t>
403
+ void uniform_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) {
404
+ if (std::is_same<scalar_t, double>::value) {
405
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
406
+ gen,
407
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
408
+ transform);
409
+ } else {
410
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
411
+ gen,
412
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
413
+ transform);
414
+ }
415
+ }
416
+
417
+ template<typename scalar_t, typename accscalar_t, size_t curand4_engine_calls, typename RNG, typename transform_t>
418
+ void normal_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) {
419
+ if (std::is_same<scalar_t, double>::value) {
420
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
421
+ gen,
422
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
423
+ transform);
424
+ } else {
425
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
426
+ gen,
427
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
428
+ transform);
429
+ }
430
+ }
431
+
432
+ // ==================================================== Normal ========================================================
433
+
434
+ template<typename RNG>
435
+ void normal_kernel(const TensorBase &self, double mean_, double std_, RNG gen) {
436
+ auto iter = TensorIterator::borrowing_nullary_op(self);
437
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_kernel_cuda", [&] {
438
+ using accscalar_t = at::acc_type<scalar_t, true>;
439
+ auto mean = static_cast<accscalar_t>(mean_);
440
+ auto std = static_cast<accscalar_t>(std_);
441
+ // define lambda to multiply std and add mean
442
+ auto normal_func = [mean, std] __device__ (accscalar_t rand) {
443
+ return static_cast<scalar_t>(transformation::normal<accscalar_t>(rand, mean, std));
444
+ };
445
+ normal_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, normal_func);
446
+ });
447
+ }
448
+
449
+ template<typename RNG>
450
+ struct NormalKernel {
451
+ void operator()(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
452
+ normal_kernel(self, mean, std, check_generator<RNG>(gen));
453
+ }
454
+ };
455
+
456
+ // ==================================================== Uniform ========================================================
457
+
458
+ template<typename RNG>
459
+ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen) {
460
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_kernel_cuda", [&] {
461
+ auto from = static_cast<scalar_t>(from_);
462
+ auto to = static_cast<scalar_t>(to_);
463
+ using opmath_t = at::opmath_type<scalar_t>;
464
+ auto range = static_cast<opmath_t>(to-from);
465
+ // define lambda to reverse bounds, multiply 'range' and add 'from_'
466
+ auto uniform_func = [range, from, to] __device__ (opmath_t rand) {
467
+ // Compute output value before reversing the bounds
468
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/96947
469
+ auto value = static_cast<scalar_t>(rand * range + from);
470
+ // reverse the bounds of curand4 from (0, 1] to [0, 1)
471
+ // Note that this method is from legacy THCTensorRandom and is likely to give
472
+ // you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
473
+ // by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
474
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706
475
+ auto reverse_bound_value = value == to ? from : value;
476
+ return reverse_bound_value;
477
+ };
478
+ uniform_and_transform<scalar_t, opmath_t, curand4_engine_calls>(iter, gen, uniform_func);
479
+ });
480
+ }
481
+
482
+ template<typename RNG>
483
+ struct UniformKernel {
484
+ void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
485
+ uniform_kernel(iter, from, to, check_generator<RNG>(gen));
486
+ }
487
+ };
488
+
489
+ // ================================================== LogNormal =======================================================
490
+
491
+ template<typename RNG>
492
+ void log_normal_kernel(TensorIteratorBase& iter, double mean_, double std_, RNG gen) {
493
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cuda", [&] {
494
+ using accscalar_t = at::acc_type<scalar_t, true>;
495
+ auto mean = static_cast<accscalar_t>(mean_);
496
+ auto std = static_cast<accscalar_t>(std_);
497
+ // define lambda for log_normal transformation
498
+ auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
499
+ return static_cast<scalar_t>(transformation::log_normal<accscalar_t>(transformation::normal<accscalar_t>(rand, mean, std)));
500
+ };
501
+ normal_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, log_normal_func);
502
+ });
503
+ }
504
+
505
+ template<typename RNG>
506
+ struct LogNormalKernel {
507
+ void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
508
+ log_normal_kernel(iter, mean, std, check_generator<RNG>(gen));
509
+ }
510
+ };
511
+
512
+ // =================================================== Geometric ======================================================
513
+
514
+ template<typename RNG>
515
+ void geometric_kernel(TensorIteratorBase& iter, double p, RNG gen) {
516
+ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cuda", [&] {
517
+ using accscalar_t = at::DiscreteDistributionType<scalar_t>::type;
518
+ // define lambda for geometric transformation
519
+ auto geometric_func = [p] __device__ (accscalar_t rand) {
520
+ return static_cast<scalar_t>(transformation::geometric<accscalar_t>(rand, p));
521
+ };
522
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, geometric_func);
523
+ });
524
+ }
525
+
526
+ template<typename RNG>
527
+ struct GeometricKernel {
528
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
529
+ geometric_kernel(iter, p, check_generator<RNG>(gen));
530
+ }
531
+ };
532
+
533
+ // ================================================== Exponential =====================================================
534
+
535
+ template<typename RNG>
536
+ void exponential_kernel(TensorIteratorBase& iter, double lambda_, RNG gen) {
537
+ TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype());
538
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cuda", [&] {
539
+ using accscalar_t = at::acc_type<scalar_t, true>;
540
+ auto lambda = static_cast<accscalar_t>(lambda_);
541
+ // define lambda for exponential transformation
542
+ auto exponential_func = [lambda] __device__ (accscalar_t rand) {
543
+ return static_cast<scalar_t>(transformation::exponential<accscalar_t>(rand, lambda));
544
+ };
545
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, exponential_func);
546
+ });
547
+ }
548
+
549
+ template<typename RNG>
550
+ struct ExponentialKernel {
551
+ void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
552
+ exponential_kernel(iter, lambda, check_generator<RNG>(gen));
553
+ }
554
+ };
555
+
556
+ // ==================================================== Cauchy ========================================================
557
+
558
+ template<typename RNG>
559
+ void cauchy_kernel(TensorIteratorBase& iter, double median_, double sigma_, RNG gen) {
560
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_cuda", [&] {
561
+ using accscalar_t = at::acc_type<scalar_t, true>;
562
+ auto median = static_cast<accscalar_t>(median_);
563
+ auto sigma = static_cast<accscalar_t>(sigma_);
564
+ // define lambda for cauchy transformation
565
+ auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
566
+ return static_cast<scalar_t>(transformation::cauchy<accscalar_t>(rand, median, sigma));
567
+ };
568
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, cauchy_func);
569
+ });
570
+ }
571
+
572
+ template<typename RNG>
573
+ struct CauchyKernel {
574
+ void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
575
+ cauchy_kernel(iter, median, sigma, check_generator<RNG>(gen));
576
+ }
577
+ };
578
+
579
+ // ==================================================== Bernoulli =====================================================
580
+
581
+ template<typename scalar_t, typename prob_t>
582
+ void bernoulli_tensor_cuda_kernel(
583
+ const TensorBase &ret, const at::TensorBase &p,
584
+ PhiloxCudaState philox_args) {
585
+ auto functor = [philox_args] __device__(
586
+ int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
587
+ const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
588
+ auto seeds = at::cuda::philox::unpack(philox_args);
589
+ curandStatePhilox4_32_10_t state;
590
+ curand_init(std::get<0>(seeds),
591
+ blockIdx.x * blockDim.x + threadIdx.x,
592
+ std::get<1>(seeds),
593
+ &state);
594
+
595
+ // See Note [Register spilling in curand call for CUDA < 10]
596
+ float4 rand = curand_uniform4(&state);
597
+ switch (n) {
598
+ case 4: {
599
+ CUDA_KERNEL_ASSERT(0 <= p4 && p4 <= 1);
600
+ v4 = static_cast<scalar_t>(rand.w <= p4);
601
+ // fallthrough
602
+ }
603
+ case 3: {
604
+ CUDA_KERNEL_ASSERT(0 <= p3 && p3 <= 1);
605
+ v3 = static_cast<scalar_t>(rand.z <= p3);
606
+ // fallthrough
607
+ }
608
+ case 2: {
609
+ CUDA_KERNEL_ASSERT(0 <= p2 && p2 <= 1);
610
+ v2 = static_cast<scalar_t>(rand.y <= p2);
611
+ // fallthrough
612
+ }
613
+ case 1: {
614
+ CUDA_KERNEL_ASSERT(0 <= p1 && p1 <= 1);
615
+ v1 = static_cast<scalar_t>(rand.x <= p1);
616
+ }
617
+ }
618
+ };
619
+ // The template argument `4` below indicates that we want to operate on four
620
+ // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
621
+ at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4, decltype(functor),
622
+ /*max_threads_per_block=*/512,
623
+ /*min_blocks_per_sm==*/2>(ret, p, functor);
624
+ }
625
+
626
+ template<typename RNG>
627
+ void bernoulli_kernel(const TensorBase &self, const TensorBase &p_, RNG gen) {
628
+ PhiloxCudaState rng_engine_inputs;
629
+ {
630
+ // See Note [Acquire lock when using random generators]
631
+ std::lock_guard<std::mutex> lock(gen->mutex_);
632
+ rng_engine_inputs = gen->philox_cuda_state(10);
633
+ }
634
+ TORCH_CHECK(at::isFloatingType(p_.scalar_type()), "expected probabilities tensor to have floating type, got ", p_.scalar_type());
635
+ // cast probabilities tensor to double for double `self` tensor, and to `float` for everything else
636
+ const auto p_type = self.dtype() == at::kDouble ? at::kDouble : at::kFloat;
637
+ auto p_cuda = p_.to(TensorOptions().device(self.device()).dtype(p_type));
638
+ auto p = expand_inplace(self, p_cuda);
639
+ AT_DISPATCH_ALL_TYPES_AND3(
640
+ at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
641
+ if (std::is_same<scalar_t, double>::value) {
642
+ return bernoulli_tensor_cuda_kernel<double, double>(self, *p, rng_engine_inputs);
643
+ } else {
644
+ return bernoulli_tensor_cuda_kernel<scalar_t, float>(self, *p, rng_engine_inputs);
645
+ }
646
+ });
647
+ }
648
+
649
+ template<typename RNG>
650
+ void bernoulli_kernel(TensorIteratorBase& iter, double p, RNG gen) {
651
+ AT_DISPATCH_ALL_TYPES_AND3(
652
+ at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
653
+ using accscalar_t = at::DiscreteDistributionType<scalar_t>::type;
654
+ // define lambda for bernoulli transformation
655
+ auto bernoulli_func = [p] __device__ (accscalar_t rand) {
656
+ return static_cast<scalar_t>(transformation::bernoulli<accscalar_t>(rand, p));
657
+ };
658
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, bernoulli_func);
659
+ });
660
+ }
661
+
662
+ template<typename RNG>
663
+ struct BernoulliKernel {
664
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
665
+ bernoulli_kernel(iter, p, check_generator<RNG>(gen));
666
+ }
667
+ void operator()(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
668
+ bernoulli_kernel(self, p_, check_generator<RNG>(gen));
669
+ }
670
+ };
671
+
672
+ }}}}
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Distributions.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ struct CUDAGeneratorImpl;
5
+ struct TensorIteratorBase;
6
+ class TensorBase;
7
+
8
+ namespace native {
9
+
10
+ void launch_poisson_cuda_kernel(
11
+ const TensorBase &ret, const TensorBase &lambda, CUDAGeneratorImpl *gen);
12
+
13
+ void launch_gamma_kernel(
14
+ const TensorBase &ret, const TensorBase &alpha, CUDAGeneratorImpl *gen);
15
+
16
+ void launch_binomial_cuda_kernel(
17
+ TensorIteratorBase &iter, CUDAGeneratorImpl *gen);
18
+
19
+ void launch_dirichlet_kernel(TensorIteratorBase &iter);
20
+
21
+ void launch_standard_gamma_grad_kernel(TensorIteratorBase &iter);
22
+
23
+ void launch_dirichlet_grad_kernel(TensorIteratorBase &iter);
24
+
25
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/cuda/Atomic.cuh>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/TensorUtils.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ Tensor embedding_backward_cuda_kernel(
11
+ const Tensor &grad,
12
+ const Tensor &orig_indices,
13
+ const Tensor &sorted_indices,
14
+ const Tensor &count,
15
+ int64_t num_weights,
16
+ int padding_idx = -1,
17
+ bool mode_mean = false,
18
+ const Tensor &offset2bag = Tensor(),
19
+ const Tensor &bag_size = Tensor(),
20
+ const Tensor &per_sample_weights = Tensor());
21
+
22
+ }}
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ForeachFunctors.cuh ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/OpMathType.h>
3
+ #include <ATen/native/ForeachUtils.h>
4
+ #include <ATen/native/cuda/MultiTensorApply.cuh>
5
+ #include <ATen/native/cuda/Pow.cuh>
6
+
7
+ namespace at::native {
8
+
9
+ namespace {
10
+
11
+ // TODO(crcrpar): Handle version bump in codegen.
12
+ // rel:
13
+ // https://github.com/pytorch/pytorch/blob/9cf84347767c8abb8feba18a9a1baba321eeb8b9/tools/autograd/gen_inplace_or_view_type.py#L481-L482
14
+ inline void increment_version(TensorList tensors) {
15
+ for (const auto& t : tensors) {
16
+ t.unsafeGetTensorImpl()->bump_version();
17
+ }
18
+ }
19
+
20
+ // Initializes args and checks if all args are aligned
21
+ template <int depth, typename T>
22
+ __device__ bool init_args(
23
+ T** args,
24
+ TensorListMetadata<depth>& tl,
25
+ const int64_t chunk_idx,
26
+ const int64_t chunk_size,
27
+ const int64_t tensor_loc) {
28
+ bool all_aligned = true;
29
+ for (int i = 0; i < depth; i++) {
30
+ args[i] = (T*)tl.addresses[i][tensor_loc];
31
+ args[i] += chunk_idx * chunk_size;
32
+
33
+ if (!is_aligned(args[i])) {
34
+ all_aligned = false;
35
+ }
36
+ }
37
+ return all_aligned;
38
+ }
39
+
40
+ // Initializes args and checks if all args are aligned
41
+ template <int depth, typename T, typename T2>
42
+ __device__ bool init_args(
43
+ T** args,
44
+ TensorListScalarListMetadata<T2, depth>& tl,
45
+ const int64_t chunk_idx,
46
+ const int64_t chunk_size,
47
+ const int64_t tensor_loc) {
48
+ bool all_aligned = true;
49
+ for (int i = 0; i < depth; i++) {
50
+ args[i] = (T*)tl.addresses[i][tensor_loc];
51
+ args[i] += chunk_idx * chunk_size;
52
+
53
+ if (!is_aligned(args[i])) {
54
+ all_aligned = false;
55
+ }
56
+ }
57
+ return all_aligned;
58
+ }
59
+
60
+ template <int depth, typename T>
61
+ __device__ bool init_args(
62
+ T** args,
63
+ FusedOptimizerTensorListMetadata<depth>& tl,
64
+ const int64_t chunk_idx,
65
+ const int64_t chunk_size,
66
+ const int64_t tensor_loc) {
67
+ bool all_aligned = true;
68
+ for (int i = 0; i < depth; i++) {
69
+ args[i] = (T*)tl.addresses[i][tensor_loc];
70
+ args[i] += chunk_idx * chunk_size;
71
+
72
+ if (!is_aligned(args[i])) {
73
+ all_aligned = false;
74
+ }
75
+ }
76
+ return all_aligned;
77
+ }
78
+
79
+ template <int depth, typename T>
80
+ __device__ void load_args(
81
+ T r_args[][kILP],
82
+ T** args,
83
+ const int64_t i_start,
84
+ const int64_t chunk_size,
85
+ const int64_t n) {
86
+ #pragma unroll
87
+ for (int ii = 0; ii < kILP; ii++) {
88
+ const auto i = i_start + threadIdx.x + ii * blockDim.x;
89
+ for (int r_index = 0; r_index < depth; r_index++) {
90
+ r_args[r_index][ii] = 0;
91
+ if (i < n && i < chunk_size) {
92
+ r_args[r_index][ii] = args[r_index][i];
93
+ }
94
+ }
95
+ }
96
+ }
97
+
98
+ template <typename T>
99
+ __device__ void store_args(
100
+ T* dst,
101
+ T* src,
102
+ const int64_t i_start,
103
+ const int64_t chunk_size,
104
+ const int64_t n) {
105
+ #pragma unroll
106
+ for (int ii = 0; ii < kILP; ii++) {
107
+ const int64_t i = i_start + threadIdx.x + ii * blockDim.x;
108
+ if (i < n && i < chunk_size)
109
+ dst[i] = src[ii];
110
+ }
111
+ }
112
+
113
+ template <int res_arg_index, typename Op, typename T, typename opmath_t>
114
+ __device__ __forceinline__ void binary_op_scalar(
115
+ T r_args[][kILP],
116
+ T** args,
117
+ opmath_t scalar,
118
+ const int64_t n,
119
+ const int64_t chunk_size,
120
+ const bool all_aligned,
121
+ Op op) {
122
+ // to make things simple, we put aligned case in a different code path
123
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
124
+ for (int64_t i_start = threadIdx.x;
125
+ i_start * kILP < n && i_start * kILP < chunk_size;
126
+ i_start += blockDim.x) {
127
+ // load
128
+ load_store(r_args[0], args[0], 0, i_start);
129
+ #pragma unroll
130
+ for (int ii = 0; ii < kILP; ii++) {
131
+ r_args[0][ii] = static_cast<T>(
132
+ op(static_cast<opmath_t>(r_args[0][ii]),
133
+ static_cast<opmath_t>(scalar)));
134
+ }
135
+ // store
136
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
137
+ }
138
+ } else {
139
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
140
+ i_start += blockDim.x * kILP) {
141
+ // Regardless if depth is 1 (for inplace) or 2 (for out of place), r_args
142
+ // has depth 1
143
+ load_args<1>(r_args, args, i_start, chunk_size, n);
144
+ #pragma unroll
145
+ for (int ii = 0; ii < kILP; ii++) {
146
+ r_args[0][ii] = static_cast<T>(
147
+ op(static_cast<opmath_t>(r_args[0][ii]),
148
+ static_cast<opmath_t>(scalar)));
149
+ }
150
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
151
+ }
152
+ }
153
+ }
154
+
155
+ template <int res_arg_index, typename Op, typename T, typename opmath_t>
156
+ __device__ __forceinline__ void pointwise_op_scalar(
157
+ T r_args[][kILP],
158
+ T** args,
159
+ opmath_t scalar,
160
+ const int64_t n,
161
+ const int64_t chunk_size,
162
+ const bool all_aligned,
163
+ Op op) {
164
+ // to make things simple, we put aligned case in a different code path
165
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
166
+ for (int64_t i_start = threadIdx.x;
167
+ i_start * kILP < n && i_start * kILP < chunk_size;
168
+ i_start += blockDim.x) {
169
+ // load
170
+ load_store(r_args[0], args[0], 0, i_start);
171
+ load_store(r_args[1], args[1], 0, i_start);
172
+ load_store(r_args[2], args[2], 0, i_start);
173
+ #pragma unroll
174
+ for (int ii = 0; ii < kILP; ii++) {
175
+ r_args[0][ii] = static_cast<T>(
176
+ static_cast<opmath_t>(r_args[0][ii]) +
177
+ scalar *
178
+ op(static_cast<opmath_t>(r_args[1][ii]),
179
+ static_cast<opmath_t>(r_args[2][ii])));
180
+ }
181
+ // store
182
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
183
+ }
184
+ } else {
185
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
186
+ i_start += blockDim.x * kILP) {
187
+ // Regardless if depth is 3 (for inplace) or 4 (for out of place), r_args
188
+ // has depth 3
189
+ load_args<3>(r_args, args, i_start, chunk_size, n);
190
+ #pragma unroll
191
+ for (int ii = 0; ii < kILP; ii++) {
192
+ r_args[0][ii] = static_cast<T>(
193
+ static_cast<opmath_t>(r_args[0][ii]) +
194
+ scalar *
195
+ op(static_cast<opmath_t>(r_args[1][ii]),
196
+ static_cast<opmath_t>(r_args[2][ii])));
197
+ }
198
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
199
+ }
200
+ }
201
+ }
202
+
203
+ //
204
+ // Binary Functors
205
+ //
206
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
207
+ struct BinaryOpScalarFunctor {
208
+ using opmath_t = at::opmath_type<T>;
209
+ template <typename Op>
210
+ __device__ __forceinline__ void operator()(
211
+ int chunk_size,
212
+ TensorListMetadata<depth>& tl,
213
+ Op op,
214
+ opmath_t scalar) {
215
+ const int tensor_loc = tl.block_to_tensor[blockIdx.x];
216
+ const int chunk_idx = tl.block_to_chunk[blockIdx.x];
217
+ auto n = tl.numel_for_tensor[tensor_loc];
218
+
219
+ T* args[depth];
220
+ const bool all_aligned =
221
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
222
+ n -= chunk_idx * chunk_size;
223
+ T r_args[r_args_depth][kILP];
224
+
225
+ binary_op_scalar<res_arg_index>(
226
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
227
+ }
228
+ };
229
+
230
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
231
+ struct BinaryOpScalarListFunctor {
232
+ using opmath_t = at::opmath_type<T>;
233
+ template <typename Op>
234
+ __device__ __forceinline__ void operator()(
235
+ int chunk_size,
236
+ TensorListScalarListMetadata<opmath_t, depth>& tl,
237
+ Op op) {
238
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
239
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
240
+ auto n = tl.numel_for_tensor[tensor_loc];
241
+
242
+ T* args[depth];
243
+ const bool all_aligned =
244
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
245
+ opmath_t scalar = tl.scalar_vals[tensor_loc];
246
+ n -= chunk_idx * chunk_size;
247
+ T r_args[r_args_depth][kILP];
248
+
249
+ binary_op_scalar<res_arg_index>(
250
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
251
+ }
252
+ };
253
+
254
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
255
+ struct BinaryOpListAlphaFunctor {
256
+ using opmath_t = at::opmath_type<T>;
257
+ template <typename Op>
258
+ __device__ __forceinline__ void operator()(
259
+ int chunk_size,
260
+ TensorListMetadata<depth>& tl,
261
+ Op op,
262
+ opmath_t alpha) {
263
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
264
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
265
+ auto n = tl.numel_for_tensor[tensor_loc];
266
+
267
+ T* args[depth];
268
+ const bool all_aligned =
269
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
270
+ n -= chunk_idx * chunk_size;
271
+ T r_args[r_args_depth][kILP];
272
+
273
+ // to make things simple, we put aligned case in a different code path
274
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
275
+ for (int64_t i_start = threadIdx.x;
276
+ i_start * kILP < n && i_start * kILP < chunk_size;
277
+ i_start += blockDim.x) {
278
+ // load
279
+ load_store(r_args[0], args[0], 0, i_start);
280
+ load_store(r_args[1], args[1], 0, i_start);
281
+ #pragma unroll
282
+ for (int ii = 0; ii < kILP; ii++) {
283
+ r_args[0][ii] = static_cast<T>(
284
+ op(static_cast<opmath_t>(r_args[0][ii]),
285
+ alpha * static_cast<opmath_t>(r_args[1][ii])));
286
+ }
287
+ // store
288
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
289
+ }
290
+ } else {
291
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
292
+ i_start += blockDim.x * kILP) {
293
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
294
+ #pragma unroll
295
+ for (int ii = 0; ii < kILP; ii++) {
296
+ r_args[0][ii] = static_cast<T>(
297
+ op(static_cast<opmath_t>(r_args[0][ii]),
298
+ alpha * static_cast<opmath_t>(r_args[1][ii])));
299
+ }
300
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
301
+ }
302
+ }
303
+ }
304
+ };
305
+
306
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
307
+ struct BinaryOpScalarTensorFunctor {
308
+ using opmath_t = at::opmath_type<T>;
309
+ template <typename Op>
310
+ __device__ __forceinline__ void operator()(
311
+ int chunk_size,
312
+ TensorListMetadata<depth>& tl,
313
+ Op op,
314
+ T* scalar,
315
+ opmath_t alpha) {
316
+ const int tensor_loc = tl.block_to_tensor[blockIdx.x];
317
+ const int chunk_idx = tl.block_to_chunk[blockIdx.x];
318
+ auto n = tl.numel_for_tensor[tensor_loc];
319
+
320
+ T* args[depth];
321
+ const bool all_aligned =
322
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
323
+ n -= chunk_idx * chunk_size;
324
+ T r_args[r_args_depth][kILP];
325
+
326
+ // to make things simple, we put aligned case in a different code path
327
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
328
+ for (int64_t i_start = threadIdx.x;
329
+ i_start * kILP < n && i_start * kILP < chunk_size;
330
+ i_start += blockDim.x) {
331
+ // load
332
+ load_store(r_args[0], args[0], 0, i_start);
333
+ #pragma unroll
334
+ for (int ii = 0; ii < kILP; ii++) {
335
+ r_args[0][ii] = static_cast<T>(op(
336
+ static_cast<opmath_t>(r_args[0][ii]),
337
+ static_cast<opmath_t>(alpha) * static_cast<opmath_t>(*scalar)));
338
+ }
339
+ // store
340
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
341
+ }
342
+ } else {
343
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
344
+ i_start += blockDim.x * kILP) {
345
+ // Regardless if depth is 1 (for inplace) or 2 (for out of place),
346
+ // r_args has depth 1
347
+ load_args<1>(r_args, args, i_start, chunk_size, n);
348
+ #pragma unroll
349
+ for (int ii = 0; ii < kILP; ii++) {
350
+ r_args[0][ii] = static_cast<T>(op(
351
+ static_cast<opmath_t>(r_args[0][ii]),
352
+ static_cast<opmath_t>(alpha) * static_cast<opmath_t>(*scalar)));
353
+ }
354
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
355
+ }
356
+ }
357
+ }
358
+ };
359
+
360
+ //
361
+ // Unary Functors
362
+ //
363
+
364
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
365
+ struct ZeroFunctor {
366
+ __device__ __forceinline__ void operator()(
367
+ int chunk_size,
368
+ TensorListMetadata<1>& tl) {
369
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
370
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
371
+ auto n = tl.numel_for_tensor[tensor_loc];
372
+
373
+ T* args[depth];
374
+ const auto all_aligned =
375
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
376
+ n -= chunk_idx * chunk_size;
377
+ T r_args[r_args_depth][kILP];
378
+
379
+ // to make things simple, we put aligned case in a different code path
380
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
381
+ for (int64_t i_start = threadIdx.x;
382
+ i_start * kILP < n && i_start * kILP < chunk_size;
383
+ i_start += blockDim.x) {
384
+ #pragma unroll
385
+ for (int ii = 0; ii < kILP; ii++) {
386
+ r_args[0][ii] = 0;
387
+ }
388
+ // store
389
+ load_store(args[0], r_args[0], i_start, 0);
390
+ }
391
+ } else {
392
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
393
+ i_start += blockDim.x * kILP) {
394
+ #pragma unroll
395
+ for (int ii = 0; ii < kILP; ii++) {
396
+ r_args[0][ii] = 0;
397
+ }
398
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
399
+ }
400
+ }
401
+ }
402
+ };
403
+
404
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
405
+ struct UnaryOpFunctor {
406
+ using opmath_t = at::opmath_type<T>;
407
+ template <typename Op>
408
+ __device__ __forceinline__ void operator()(
409
+ int chunk_size,
410
+ TensorListMetadata<depth>& tl,
411
+ Op op) {
412
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
413
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
414
+ auto n = tl.numel_for_tensor[tensor_loc];
415
+
416
+ T* args[depth];
417
+ bool all_aligned =
418
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
419
+ n -= chunk_idx * chunk_size;
420
+ T r_args[r_args_depth][kILP];
421
+
422
+ // to make things simple, we put aligned case in a different code path
423
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
424
+ for (int64_t i_start = threadIdx.x;
425
+ i_start * kILP < n && i_start * kILP < chunk_size;
426
+ i_start += blockDim.x) {
427
+ // load
428
+ load_store(r_args[0], args[0], 0, i_start);
429
+ #pragma unroll
430
+ for (int ii = 0; ii < kILP; ii++) {
431
+ r_args[0][ii] =
432
+ static_cast<T>(op(static_cast<opmath_t>(r_args[0][ii])));
433
+ }
434
+ // store
435
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
436
+ }
437
+ } else {
438
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
439
+ i_start += blockDim.x * kILP) {
440
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
441
+ #pragma unroll
442
+ for (int ii = 0; ii < kILP; ii++) {
443
+ r_args[0][ii] =
444
+ static_cast<T>(op(static_cast<opmath_t>(r_args[0][ii])));
445
+ }
446
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
447
+ }
448
+ }
449
+ }
450
+ };
451
+
452
+ //
453
+ // Pointwise Functors
454
+ //
455
+
456
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
457
+ struct PointwiseOpScalarFunctor {
458
+ using opmath_t = at::opmath_type<T>;
459
+ template <typename Op>
460
+ __device__ __forceinline__ void operator()(
461
+ int chunk_size,
462
+ TensorListMetadata<depth>& tl,
463
+ Op op,
464
+ opmath_t scalar) {
465
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
466
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
467
+ auto n = tl.numel_for_tensor[tensor_loc];
468
+
469
+ T* args[depth];
470
+ const bool all_aligned =
471
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
472
+ n -= chunk_idx * chunk_size;
473
+ T r_args[r_args_depth][kILP];
474
+
475
+ pointwise_op_scalar<res_arg_index>(
476
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
477
+ }
478
+ };
479
+
480
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
481
+ struct PointwiseOpScalarListFunctor {
482
+ using opmath_t = at::opmath_type<T>;
483
+ template <typename Op>
484
+ __device__ __forceinline__ void operator()(
485
+ int chunk_size,
486
+ TensorListScalarListMetadata<opmath_t, depth>& tl,
487
+ Op op) {
488
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
489
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
490
+ auto n = tl.numel_for_tensor[tensor_loc];
491
+
492
+ T* args[depth];
493
+ const bool all_aligned =
494
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
495
+ opmath_t scalar = tl.scalar_vals[tensor_loc];
496
+ n -= chunk_idx * chunk_size;
497
+ T r_args[r_args_depth][kILP];
498
+
499
+ pointwise_op_scalar<res_arg_index>(
500
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
501
+ }
502
+ };
503
+
504
+ template <typename T, int depth>
505
+ struct PointwiseOpListFunctor {
506
+ using opmath_t = at::opmath_type<T>;
507
+ template <typename Op>
508
+ __device__ __forceinline__ void operator()(
509
+ int chunk_size,
510
+ TensorListMetadata<depth>& tl,
511
+ Op op) {
512
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
513
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
514
+ auto n = tl.numel_for_tensor[tensor_loc];
515
+
516
+ T* args[depth];
517
+ const bool all_aligned =
518
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
519
+ n -= chunk_idx * chunk_size;
520
+ T r_args[depth - 1][kILP];
521
+
522
+ // to make things simple, we put aligned case in a different code path
523
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
524
+ for (int64_t i_start = threadIdx.x;
525
+ i_start * kILP < n && i_start * kILP < chunk_size;
526
+ i_start += blockDim.x) {
527
+ // load
528
+ load_store(r_args[0], args[0], 0, i_start);
529
+ load_store(r_args[1], args[1], 0, i_start);
530
+ #pragma unroll
531
+ for (int ii = 0; ii < kILP; ii++) {
532
+ r_args[0][ii] = static_cast<T>(
533
+ op(static_cast<opmath_t>(r_args[0][ii]),
534
+ static_cast<opmath_t>(r_args[1][ii])));
535
+ }
536
+ // store
537
+ load_store(args[2], r_args[0], i_start, 0);
538
+ }
539
+ } else {
540
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
541
+ i_start += blockDim.x * kILP) {
542
+ load_args<depth - 1>(r_args, args, i_start, chunk_size, n);
543
+ #pragma unroll
544
+ for (int ii = 0; ii < kILP; ii++) {
545
+ r_args[0][ii] = static_cast<T>(
546
+ op(static_cast<opmath_t>(r_args[0][ii]),
547
+ static_cast<opmath_t>(r_args[1][ii])));
548
+ }
549
+ store_args(args[2], r_args[0], i_start, chunk_size, n);
550
+ }
551
+ }
552
+ }
553
+ };
554
+
555
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
556
+ struct TernaryOpListFunctor {
557
+ using opmath_t = at::opmath_type<T>;
558
+ template <typename Op>
559
+ __device__ __forceinline__ void operator()(
560
+ int chunk_size,
561
+ TensorListMetadata<depth>& tl,
562
+ Op op) {
563
+ static_assert(depth == 3 || depth == 4, "");
564
+ static_assert(depth >= r_args_depth, "");
565
+ static_assert(res_arg_index == depth - 1 || res_arg_index == 0, "");
566
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
567
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
568
+ auto n = tl.numel_for_tensor[tensor_loc];
569
+
570
+ T* args[depth];
571
+ const bool all_aligned =
572
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
573
+ n -= chunk_idx * chunk_size;
574
+ T r_args[r_args_depth][kILP];
575
+
576
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
577
+ for (int64_t i_start = threadIdx.x;
578
+ i_start * kILP < n && i_start * kILP < chunk_size;
579
+ i_start += blockDim.x) {
580
+ load_store(r_args[0], args[0], 0, i_start);
581
+ load_store(r_args[1], args[1], 0, i_start);
582
+ load_store(r_args[2], args[2], 0, i_start);
583
+ #pragma unroll
584
+ for (int ii = 0; ii < kILP; ii++) {
585
+ r_args[0][ii] =
586
+ op(static_cast<opmath_t>(r_args[0][ii]),
587
+ static_cast<opmath_t>(r_args[1][ii]),
588
+ static_cast<opmath_t>(r_args[2][ii]));
589
+ }
590
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
591
+ }
592
+ } else {
593
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
594
+ i_start += blockDim.x * kILP) {
595
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
596
+ #pragma unroll
597
+ for (int ii = 0; ii < kILP; ii++) {
598
+ r_args[0][ii] =
599
+ op(static_cast<opmath_t>(r_args[0][ii]),
600
+ static_cast<opmath_t>(r_args[1][ii]),
601
+ static_cast<opmath_t>(r_args[2][ii]));
602
+ }
603
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
604
+ }
605
+ }
606
+ }
607
+ };
608
+
609
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
610
+ struct TernaryOpScalarFunctor {
611
+ using opmath_t = at::opmath_type<T>;
612
+ template <typename Op>
613
+ __device__ __forceinline__ void operator()(
614
+ int chunk_size,
615
+ TensorListMetadata<depth>& tl,
616
+ Op op,
617
+ opmath_t alpha) {
618
+ static_assert(depth == 2 || depth == 3, "");
619
+ static_assert(depth >= r_args_depth, "");
620
+ static_assert(res_arg_index == depth - 1 || res_arg_index == 0, "");
621
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
622
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
623
+ auto n = tl.numel_for_tensor[tensor_loc];
624
+
625
+ T* args[depth];
626
+ const bool all_aligned =
627
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
628
+ n -= chunk_idx * chunk_size;
629
+ T r_args[r_args_depth][kILP];
630
+
631
+ // to make things simple, we put aligned case in a different code path
632
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
633
+ for (int64_t i_start = threadIdx.x;
634
+ i_start * kILP < n && i_start * kILP < chunk_size;
635
+ i_start += blockDim.x) {
636
+ // load
637
+ load_store(r_args[0], args[0], 0, i_start);
638
+ load_store(r_args[1], args[1], 0, i_start);
639
+ #pragma unroll
640
+ for (int ii = 0; ii < kILP; ii++) {
641
+ r_args[0][ii] =
642
+ op(static_cast<opmath_t>(r_args[0][ii]),
643
+ static_cast<opmath_t>(r_args[1][ii]),
644
+ alpha);
645
+ }
646
+ // store
647
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
648
+ }
649
+ } else {
650
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
651
+ i_start += blockDim.x * kILP) {
652
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
653
+ #pragma unroll
654
+ for (int ii = 0; ii < kILP; ii++) {
655
+ r_args[0][ii] =
656
+ op(static_cast<opmath_t>(r_args[0][ii]),
657
+ static_cast<opmath_t>(r_args[1][ii]),
658
+ alpha);
659
+ }
660
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
661
+ }
662
+ }
663
+ }
664
+ };
665
+
666
+ template <typename T>
667
+ struct power_functor {
668
+ C10_DEVICE T operator()(const T& a, const T& b) const {
669
+ return at::native::pow_(a, b);
670
+ }
671
+ };
672
+
673
+ template <typename T>
674
+ struct reverse_power_functor {
675
+ C10_DEVICE T operator()(const T& a, const T& b) const {
676
+ return at::native::pow_(b, a);
677
+ }
678
+ };
679
+
680
+ } // namespace
681
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MiscUtils.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cuda/Exceptions.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/CUDAConfig.h>
5
+ #include <ATen/cuda/PinnedMemoryAllocator.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ static inline int cuda_int_cast(int64_t value, const char* varname) {
11
+ auto result = static_cast<int>(value);
12
+ TORCH_CHECK(static_cast<int64_t>(result) == value,
13
+ "cuda_int_cast: The value of ", varname, "(", (long long)value,
14
+ ") is too large to fit into a int (", sizeof(int), " bytes)");
15
+ return result;
16
+ }
17
+
18
+ // Creates an array of size elements of type T, backed by pinned memory
19
+ // wrapped in a Storage
20
+ template<class T>
21
+ static inline Storage pin_memory(int64_t size) {
22
+ auto* allocator = cuda::getPinnedMemoryAllocator();
23
+ int64_t adjusted_size = size * sizeof(T);
24
+ return Storage(
25
+ Storage::use_byte_size_t(),
26
+ adjusted_size,
27
+ allocator,
28
+ /*resizable=*/false);
29
+ }
30
+
31
+ } // namespace native
32
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MultiTensorApply.cuh ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <c10/cuda/CUDAGuard.h>
5
+ #include <ATen/native/cuda/Loops.cuh>
6
+ #include <ATen/native/cuda/MemoryAccess.cuh>
7
+ #include <vector>
8
+
9
+ namespace at::native {
10
+
11
+ namespace {
12
+
13
+ static constexpr int64_t kILP = 4;
14
+ static constexpr int64_t kChunkSize = 65536;
15
+ static constexpr int64_t kBlockSize = 512;
16
+
17
+ // TODO(crcrpar): Add `n>5` for `low prec params & their higher prec copy`
18
+ // TensorListMetadata has to be < 4KB - the limit for kernel launch argument
19
+ static constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
20
+ static constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320};
21
+ static constexpr int depth_to_max_tensors_scalarlist[5] = {96, 64, 48, 36, 30};
22
+ static constexpr int depth_to_max_tensors_scalarlist_of_complex_double[2] = {
23
+ 72,
24
+ 60};
25
+
26
+ template <typename T>
27
+ __device__ __forceinline__ bool is_aligned(T* p) {
28
+ return ((uint64_t)p) % (kILP * sizeof(T)) == 0;
29
+ }
30
+
31
+ template <typename T>
32
+ __device__ __forceinline__ void load_store(
33
+ T* dst,
34
+ T* src,
35
+ int64_t dst_offset,
36
+ int64_t src_offset) {
37
+ using LT = at::native::memory::aligned_vector<T, kILP>;
38
+ ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
39
+ }
40
+
41
+ template <int n>
42
+ struct TensorListMetadata {
43
+ const void* addresses[n][depth_to_max_tensors[n - 1]];
44
+ int64_t numel_for_tensor[depth_to_max_tensors[n - 1]];
45
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
46
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
47
+ int start_tensor_this_launch;
48
+ };
49
+
50
+ template <typename scalar_vals_t, int n>
51
+ struct TensorListScalarListMetadata {
52
+ const void* addresses[n][depth_to_max_tensors_scalarlist[n - 1]];
53
+ int64_t numel_for_tensor[depth_to_max_tensors_scalarlist[n - 1]];
54
+ scalar_vals_t scalar_vals[depth_to_max_tensors_scalarlist[n - 1]];
55
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
56
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
57
+ };
58
+
59
+ // note(mkozuki): `n` of 1&2 violate the limit of cuda kernel argument size of
60
+ // 4kb with `c10::complex<double>`
61
+ template <>
62
+ struct TensorListScalarListMetadata<c10::complex<double>, 1> {
63
+ const void* addresses[1]
64
+ [depth_to_max_tensors_scalarlist_of_complex_double[0]];
65
+ int64_t
66
+ numel_for_tensor[depth_to_max_tensors_scalarlist_of_complex_double[0]];
67
+ c10::complex<double>
68
+ scalar_vals[depth_to_max_tensors_scalarlist_of_complex_double[0]];
69
+ unsigned char block_to_tensor[depth_to_max_blocks[1 - 1]];
70
+ int block_to_chunk[depth_to_max_blocks[1 - 1]];
71
+ };
72
+
73
+ template <>
74
+ struct TensorListScalarListMetadata<c10::complex<double>, 2> {
75
+ const void* addresses[2]
76
+ [depth_to_max_tensors_scalarlist_of_complex_double[1]];
77
+ int64_t
78
+ numel_for_tensor[depth_to_max_tensors_scalarlist_of_complex_double[1]];
79
+ c10::complex<double>
80
+ scalar_vals[depth_to_max_tensors_scalarlist_of_complex_double[1]];
81
+ unsigned char block_to_tensor[depth_to_max_blocks[2 - 1]];
82
+ int block_to_chunk[depth_to_max_blocks[2 - 1]];
83
+ };
84
+
85
+ // NOTE(crcrpar): This is a conservative resolution to handle `state_steps`
86
+ // whose each element is `at::Tensor` of 1 element representing the number of
87
+ // `step`s called so far.
88
+ template <int n>
89
+ struct FusedOptimizerTensorListMetadata {
90
+ const void* addresses[n][depth_to_max_tensors[n - 1]];
91
+ int64_t numel_for_tensor[depth_to_max_tensors[n - 1]];
92
+ const void* state_steps_addresses[depth_to_max_tensors_scalarlist[n - 1]];
93
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
94
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
95
+ int start_tensor_this_launch;
96
+ };
97
+
98
+ template <typename T, typename U, typename... ArgTypes>
99
+ C10_LAUNCH_BOUNDS_1(kBlockSize)
100
+ __global__ void multi_tensor_apply_kernel(
101
+ T tensorListMeta,
102
+ U callable,
103
+ ArgTypes... args) {
104
+ // Hand the chunk information to the user-supplied functor to process however
105
+ // it likes.
106
+ callable(kChunkSize, tensorListMeta, args...);
107
+ }
108
+
109
+ } // namespace
110
+
111
+ // multi_tensor_apply enables horizontal fusion across lists of tensors.
112
+ // For example, whereas you once had a for-loop of a + b = c, where a, b,
113
+ // and c are individual tensors in lists as, bs, and cs, you can now with
114
+ // fewer kernel launches compute as + bs = cs.
115
+ //
116
+ // You can also imagine bs to be a scalar list vs a tensor list.
117
+ //
118
+ // The function below takes in tensor lists, scalars, and a callable and
119
+ // chunks up the computation to launch as few kernels as possible by iterating
120
+ // through every "chunk" in every tensor (thus the nested for loops). In the
121
+ // simplest case, everything gets bundled into just one kernel launch, but
122
+ // due to blocksize constraints, we may need to launch multiple kernels.
123
+ // Each kernel launch is defined by one tensorListMeta construct, which we
124
+ // use to track and reset the necessary metadata for each launch.
125
+ template <int depth, typename scalar_T, typename T, typename... ArgTypes>
126
+ void multi_tensor_apply(
127
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
128
+ at::ArrayRef<Scalar> scalars,
129
+ T callable,
130
+ ArgTypes... args) {
131
+ TORCH_CHECK(
132
+ tensor_lists.size() == depth,
133
+ "Number of tensor lists has to match the depth.");
134
+ const size_t n_tensors = tensor_lists[0].size();
135
+ using scalar_vals_t = typename T::opmath_t;
136
+ TensorListScalarListMetadata<scalar_vals_t, depth> tensorListMeta;
137
+
138
+ int loc_block_info = 0;
139
+ int loc_tensor_info = 0;
140
+ for (size_t t = 0; t < n_tensors; t++) {
141
+ // short-circuit to avoid adding empty tensors to tensorListMeta
142
+ if (tensor_lists[0][t].numel() == 0) {
143
+ continue;
144
+ }
145
+ tensorListMeta.scalar_vals[loc_tensor_info] = scalars[t].to<scalar_T>();
146
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
147
+ tensor_lists[0][t].numel();
148
+ for (int d = 0; d < depth; d++) {
149
+ tensorListMeta.addresses[d][loc_tensor_info] =
150
+ tensor_lists[d][t].const_data_ptr();
151
+ }
152
+ loc_tensor_info++;
153
+
154
+ // now we enter [chunking territory].
155
+ // we will launch a kernel when EITHER the blocks get filled up OR
156
+ // the tensors get filled up. There will always be at least one block
157
+ // per tensor since the zero-sized ones will not enter the loop, so
158
+ // the nested forloop within represents iterating through the chunks
159
+ // of a single tensor.
160
+ const auto numel = tensor_lists[0][t].numel();
161
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
162
+ for (auto chunk = 0; chunk < chunks; chunk++) {
163
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
164
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
165
+ loc_block_info++;
166
+
167
+ // a tensor is not considered full unless all its chunks have been
168
+ // processed
169
+ const bool tensors_full =
170
+ (loc_tensor_info == depth_to_max_tensors_scalarlist[depth - 1] &&
171
+ chunk == chunks - 1);
172
+ const bool blocks_full =
173
+ (loc_block_info == depth_to_max_blocks[depth - 1]);
174
+
175
+ if (tensors_full || blocks_full) {
176
+ multi_tensor_apply_kernel<<<
177
+ loc_block_info,
178
+ kBlockSize,
179
+ 0,
180
+ at::cuda::getCurrentCUDAStream()>>>(
181
+ tensorListMeta, callable, args...);
182
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
183
+
184
+ // Reset.
185
+ loc_block_info = 0;
186
+ // all chunks have already been handled in the kernel
187
+ if (chunk == chunks - 1) {
188
+ loc_tensor_info = 0;
189
+ } else { // blocks were full and tensor chunks remain
190
+ tensorListMeta.numel_for_tensor[0] =
191
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
192
+ tensorListMeta.scalar_vals[0] =
193
+ tensorListMeta.scalar_vals[loc_tensor_info - 1];
194
+ for (int d = 0; d < depth; d++) {
195
+ tensorListMeta.addresses[d][0] =
196
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
197
+ }
198
+ loc_tensor_info = 1;
199
+ }
200
+ }
201
+ }
202
+ }
203
+
204
+ // note: [finishing what we started]
205
+ // if there's remaining work to be done but the tensors/blocks aren't full
206
+ // yet we are at the end, submit the kernel to do the work!
207
+ if (loc_block_info != 0) {
208
+ multi_tensor_apply_kernel<<<
209
+ loc_block_info,
210
+ kBlockSize,
211
+ 0,
212
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
213
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
214
+ }
215
+ }
216
+
217
+ template <int depth, typename T, typename... ArgTypes>
218
+ void multi_tensor_apply(
219
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
220
+ T callable,
221
+ ArgTypes... args) {
222
+ TORCH_CHECK(
223
+ tensor_lists.size() == depth,
224
+ "Number of tensor lists has to match the depth.");
225
+ const size_t n_tensors = tensor_lists[0].size();
226
+ TensorListMetadata<depth> tensorListMeta;
227
+ tensorListMeta.start_tensor_this_launch = 0;
228
+
229
+ int loc_block_info = 0;
230
+ int loc_tensor_info = 0;
231
+ for (size_t t = 0; t < n_tensors; t++) {
232
+ // short-circuit to avoid adding empty tensors to tensorListMeta
233
+ if (tensor_lists[0][t].numel() == 0) {
234
+ continue;
235
+ }
236
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
237
+ tensor_lists[0][t].numel();
238
+ for (int d = 0; d < depth; d++) {
239
+ tensorListMeta.addresses[d][loc_tensor_info] =
240
+ tensor_lists[d][t].const_data_ptr();
241
+ }
242
+ loc_tensor_info++;
243
+
244
+ // see note: [chunking territory].
245
+ const auto numel = tensor_lists[0][t].numel();
246
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
247
+ for (auto chunk = 0; chunk < chunks; chunk++) {
248
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
249
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
250
+ loc_block_info++;
251
+
252
+ const bool tensors_full =
253
+ (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
254
+ chunk == chunks - 1);
255
+ const bool blocks_full =
256
+ (loc_block_info == depth_to_max_blocks[depth - 1]);
257
+
258
+ if (tensors_full || blocks_full) {
259
+ multi_tensor_apply_kernel<<<
260
+ loc_block_info,
261
+ kBlockSize,
262
+ 0,
263
+ at::cuda::getCurrentCUDAStream()>>>(
264
+ tensorListMeta, callable, args...);
265
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
266
+
267
+ // Reset.
268
+ loc_block_info = 0;
269
+ if (chunk == chunks - 1) {
270
+ loc_tensor_info = 0;
271
+ tensorListMeta.start_tensor_this_launch = t + 1;
272
+ } else {
273
+ tensorListMeta.numel_for_tensor[0] =
274
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
275
+ for (int d = 0; d < depth; d++) {
276
+ tensorListMeta.addresses[d][0] =
277
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
278
+ }
279
+ loc_tensor_info = 1;
280
+ tensorListMeta.start_tensor_this_launch = t;
281
+ }
282
+ }
283
+ }
284
+ }
285
+
286
+ // see note: [finishing what we started]
287
+ if (loc_block_info != 0) {
288
+ multi_tensor_apply_kernel<<<
289
+ loc_block_info,
290
+ kBlockSize,
291
+ 0,
292
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
293
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
294
+ }
295
+ }
296
+
297
+ template <int depth, typename T, typename... ArgTypes>
298
+ void multi_tensor_apply_for_fused_optimizer(
299
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
300
+ at::TensorList state_steps,
301
+ T callable,
302
+ ArgTypes... args) {
303
+ TORCH_CHECK(
304
+ tensor_lists.size() == depth,
305
+ "Number of tensor lists has to match the depth");
306
+ const auto num_tensors = tensor_lists[0].size();
307
+ FusedOptimizerTensorListMetadata<depth> tensorListMeta;
308
+
309
+ int loc_block_info = 0;
310
+ int loc_tensor_info = 0;
311
+ for (const auto& tensor_index : c10::irange(num_tensors)) {
312
+ // short-circuit to avoid adding empty tensors to tensorListMeta
313
+ if (tensor_lists[0][tensor_index].numel() == 0) {
314
+ continue;
315
+ }
316
+ tensorListMeta.state_steps_addresses[loc_tensor_info] =
317
+ state_steps[tensor_index].const_data_ptr();
318
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
319
+ tensor_lists[0][tensor_index].numel();
320
+ for (const auto& d : c10::irange(depth)) {
321
+ tensorListMeta.addresses[d][loc_tensor_info] =
322
+ tensor_lists[d][tensor_index].const_data_ptr();
323
+ }
324
+ loc_tensor_info++;
325
+
326
+ // see above note: [chunking territory]
327
+ const auto numel = tensor_lists[0][tensor_index].numel();
328
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
329
+ TORCH_CHECK(chunks > -1);
330
+ for (const auto& chunk : c10::irange(chunks)) {
331
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
332
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
333
+ loc_block_info++;
334
+
335
+ const auto tensor_full =
336
+ (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
337
+ chunk == chunks - 1);
338
+ const auto blocks_full = loc_block_info == depth_to_max_blocks[depth - 1];
339
+
340
+ if (tensor_full || blocks_full) {
341
+ multi_tensor_apply_kernel<<<
342
+ loc_block_info,
343
+ kBlockSize,
344
+ 0,
345
+ at::cuda::getCurrentCUDAStream()>>>(
346
+ tensorListMeta, callable, args...);
347
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
348
+
349
+ // Reset.
350
+ loc_block_info = 0;
351
+ if (chunk == chunks - 1) {
352
+ loc_tensor_info = 0;
353
+ } else {
354
+ tensorListMeta.numel_for_tensor[0] =
355
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
356
+ tensorListMeta.state_steps_addresses[0] =
357
+ tensorListMeta.state_steps_addresses[loc_tensor_info - 1];
358
+ for (const auto& d : c10::irange(depth)) {
359
+ tensorListMeta.addresses[d][0] =
360
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
361
+ }
362
+ loc_tensor_info = 1;
363
+ }
364
+ }
365
+ }
366
+ }
367
+
368
+ // see above note: [finishing what we've started]
369
+ if (loc_block_info != 0) {
370
+ multi_tensor_apply_kernel<<<
371
+ loc_block_info,
372
+ kBlockSize,
373
+ 0,
374
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
375
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
376
+ }
377
+ }
378
+
379
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Randperm.cuh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/cuda/CUDAGeneratorImpl.h>
2
+ #include <ATen/cuda/CUDAGraphsUtils.cuh>
3
+ #include <ATen/Utils.h>
4
+
5
+ #include <curand.h>
6
+ #include <curand_kernel.h>
7
+ #include <curand_philox4x32_x.h>
8
+
9
+ namespace {
10
+
11
+ // See note [Algorithm of randperm]
12
+ template<typename T, typename scalar_t>
13
+ __global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T mask, int n, at::PhiloxCudaState philox_args) {
14
+ int tid = threadIdx.x + blockDim.x * blockIdx.x;
15
+
16
+ // find the beginning of islands
17
+ if (tid >= n - 1) return; // out of range
18
+ if ((keys[tid] & mask) != (keys[tid + 1] & mask)) return; // not in an island
19
+ if (tid != 0 && (keys[tid] & mask) == (keys[tid - 1] & mask)) return; // not the beginning of an island
20
+
21
+ // find the size of islands
22
+ int island_size = 0;
23
+ do { island_size++; }
24
+ while ((tid + island_size < n) && (keys[tid + island_size] & mask) == (keys[tid] & mask));
25
+
26
+ // do random permutation inside each island.
27
+ data += tid;
28
+ auto seeds = at::cuda::philox::unpack(philox_args);
29
+ curandStatePhilox4_32_10_t state;
30
+ curand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state);
31
+ for (int i = island_size - 1; i > 0; i--) {
32
+ unsigned int r = curand(&state) % (i + 1);
33
+ if (i != r) {
34
+ scalar_t tmp = data[i];
35
+ data[i] = data[r];
36
+ data[r] = tmp;
37
+ }
38
+ }
39
+ }
40
+
41
+ // See note [Algorithm of randperm]
42
+ template<typename T, typename scalar_t>
43
+ void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional<at::Generator> &gen_) {
44
+ auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(gen_, at::cuda::detail::getDefaultCUDAGenerator());
45
+ int64_t counter_offset = n;
46
+ at::PhiloxCudaState rng_engine_inputs;
47
+ {
48
+ // See Note [Acquire lock when using random generators]
49
+ std::lock_guard<std::mutex> lock(gen->mutex_);
50
+ rng_engine_inputs = gen->philox_cuda_state(counter_offset);
51
+ }
52
+ T mask = static_cast<T>((1UL << bits) - 1);
53
+ randperm_handle_duplicate_keys_kernel<<<(n + 511) / 512, 512, 0, at::cuda::getCurrentCUDAStream()>>>(
54
+ keys, data, mask, n, rng_engine_inputs);
55
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
56
+ }
57
+
58
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ScanUtils.cuh ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/NumericUtils.h>
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <ATen/cuda/cub.cuh>
5
+ #include <ATen/cuda/CUDAContext.h>
6
+
7
+ #include <c10/util/Load.h>
8
+ #include <limits>
9
+ #include <cmath>
10
+
11
+ namespace at {
12
+ namespace native {
13
+
14
+ template <typename integer>
15
+ constexpr inline integer ceil_div(integer n, integer m) {
16
+ return (n + m - 1) / m;
17
+ }
18
+
19
+ template <typename integer>
20
+ constexpr inline integer get_log_num_threads_x_inner_scan(integer num_rows, integer row_size) {
21
+ integer log_num_threads_x = 0;
22
+ integer log_num_threads_y = 0;
23
+ while (((integer)1 << log_num_threads_x) < row_size) {
24
+ ++log_num_threads_x;
25
+ }
26
+ while (((integer)1 << log_num_threads_y) < num_rows) {
27
+ ++log_num_threads_y;
28
+ }
29
+ // we want to keep the ratio between the x-threads and y-threads about the same as
30
+ // the ratio between the row_size and num_rows, but the total number of threads in
31
+ // a block should be about 512
32
+ integer diff = log_num_threads_x - log_num_threads_y;
33
+ // 9 is from log2(512)
34
+ log_num_threads_x = ((integer)9 + diff) / (integer)2;
35
+ // I found that in having larger log_num_threads_x can give significant speed up in some cases,
36
+ // but detrimental in another case, so just keep the lower bound to be log2(16) == 4 to make it
37
+ // similar to the previous implementation
38
+ // Keeping the upper bound to be log2(512) == 9 as the maximum number of threads in a block.
39
+ log_num_threads_x = std::min(std::max((integer)4, log_num_threads_x), (integer)9);
40
+ return log_num_threads_x;
41
+ }
42
+
43
+ template<typename scalar_t, typename idx_t, typename BinaryOperation>
44
+ __device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
45
+ if(!at::_isnan(rhs) && (at::_isnan(lhs) || !binary_op(rhs, lhs))) {
46
+ rhs = lhs;
47
+ rhs_idx = lhs_idx;
48
+ }
49
+ }
50
+ /* Perform an inclusive scan along the innermost dimension of a tensor.
51
+ *
52
+ * - num_rows is the size of the flattened outer dimensions;
53
+ * - row_size is the size of the innermost dimension;
54
+ *
55
+ * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
56
+ * considered as having 'num_rows' rows of size 'row_size'.
57
+ * Each thread block processes one or more sets of contiguous rows (processing multiple rows
58
+ * per thread block is quicker than processing a single row, especially for short rows).
59
+ */
60
+ template<typename scalar_t, class BinaryFunction>
61
+ __global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
62
+ int num_rows, int row_size,
63
+ const uint32_t num_threads, const uint32_t log_num_threads_x,
64
+ scalar_t init, BinaryFunction binary_op) {
65
+ // dynamic memory allocation for vbuf and ibuf
66
+ alignas(sizeof(double)) extern __shared__ char buf[];
67
+ scalar_t* vbuf = reinterpret_cast<scalar_t*>(buf); // the size is num_threads * 2
68
+ int64_t* ibuf = reinterpret_cast<int64_t*>(vbuf + num_threads * 2);
69
+ const uint32_t num_threads_x = 1 << log_num_threads_x;
70
+ scalar_t* row_buf = vbuf + 2 * num_threads_x * threadIdx.y;
71
+ int64_t* row_idx_buf = ibuf + 2 * num_threads_x * threadIdx.y;
72
+
73
+ for (int block_row = blockIdx.x * blockDim.y;
74
+ block_row < num_rows;
75
+ block_row += blockDim.y * gridDim.x) {
76
+ int row = block_row + threadIdx.y;
77
+ const scalar_t *row_self = self_ + row * row_size;
78
+ scalar_t *row_values = values_ + row * row_size;
79
+ int64_t *row_indices = indices_ + row * row_size;
80
+ scalar_t block_total = init;
81
+ int64_t block_idx_final = 0;
82
+ const bool row_exists = row < num_rows;
83
+ // Perform scan on one block at a time, keeping track of the total value of
84
+ // all blocks processed so far.
85
+ for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
86
+ // Load data into shared memory (two values per thread).
87
+ int col1 = block_col + threadIdx.x;
88
+ int col2 = block_col + num_threads_x + threadIdx.x;
89
+ if (row_exists) {
90
+ if (col1 < row_size) {
91
+ row_buf[threadIdx.x] = c10::load(&row_self[col1]);
92
+ row_idx_buf[threadIdx.x] = col1;
93
+ } else {
94
+ row_buf[threadIdx.x] = init;
95
+ // No need to set the index here as the value in init will never be selected
96
+ }
97
+
98
+ if (col2 < row_size) {
99
+ row_buf[num_threads_x + threadIdx.x] = c10::load(&row_self[col2]);
100
+ row_idx_buf[num_threads_x + threadIdx.x] = col2;
101
+ } else {
102
+ row_buf[num_threads_x + threadIdx.x] = init;
103
+ // No need to set the index here as the value in init will never be selected
104
+ }
105
+
106
+ // Add the total value of all previous blocks to the first value of this block.
107
+ if (threadIdx.x == 0) {
108
+ binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
109
+ }
110
+ }
111
+ __syncthreads();
112
+
113
+ // Parallel reduction with Sklansky method. The diagram can be seen on this paper:
114
+ // https://research.nvidia.com/publication/single-pass-parallel-prefix-scan-decoupled-look-back
115
+ for (uint32_t s = 1; s <= num_threads_x; s <<= 1) {
116
+ if (row_exists) {
117
+ uint32_t a = (threadIdx.x / s) * (2 * s) + s;
118
+ uint32_t ti = a + (threadIdx.x % s);
119
+ uint32_t si = a - 1;
120
+ binary_op_update(row_buf[si], row_buf[ti], row_idx_buf[si], row_idx_buf[ti], binary_op);
121
+ }
122
+ __syncthreads();
123
+ }
124
+
125
+ // Write back to output.
126
+ if (row_exists) {
127
+ if (col1 < row_size){
128
+ row_values[col1] = row_buf[threadIdx.x];
129
+ row_indices[col1] = row_idx_buf[threadIdx.x];
130
+ }
131
+ if (col2 < row_size) {
132
+ row_values[col2] = row_buf[num_threads_x + threadIdx.x];
133
+ row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
134
+ }
135
+ }
136
+ block_total = row_buf[2 * num_threads_x - 1];
137
+ block_idx_final = row_idx_buf[2 * num_threads_x - 1];
138
+ __syncthreads();
139
+ }
140
+ }
141
+ }
142
+
143
+ /* Perform an inclusive scan along an outer dimension of a tensor.
144
+ *
145
+ * - num_orows is the size of the flattened outer dimensions;
146
+ * - num_irows is the size of the flattened inner dimensions;
147
+ * - row_size is the size of the dimension along which to compute the variance;
148
+ *
149
+ * The dimensions to the outside and inside of the specified dimension are considered as flattened.
150
+ * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
151
+ * outer dimensions, which contains several "inner rows").
152
+ * Each thread processes a single inner row at a time.
153
+ */
154
+ template<typename scalar_t, class BinaryFunction>
155
+ __global__ void tensor_kernel_scan_outer_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
156
+ const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) {
157
+ for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
158
+ for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
159
+ const scalar_t *self = self_ + orow * row_size * num_irows + irow;
160
+ scalar_t *values = values_ + orow * row_size * num_irows + irow;
161
+ int64_t *indices = indices_ + orow * row_size * num_irows + irow;
162
+ scalar_t out = init;
163
+ int64_t out_idx = 0;
164
+
165
+ for (auto col = decltype(row_size){0}; col < row_size; ++col) {
166
+ const auto val = c10::load(self);
167
+ if(at::_isnan(val) || (!at::_isnan(out) && binary_op(val, out))) {
168
+ out = val;
169
+ out_idx = col;
170
+ }
171
+ *values = out;
172
+ *indices = out_idx;
173
+ self += num_irows;
174
+ values += num_irows;
175
+ indices += num_irows;
176
+ }
177
+ }
178
+ }
179
+ }
180
+
181
+ inline void check_fits_in_unsigned(int64_t val, const char* name) {
182
+ constexpr auto umax = std::numeric_limits<uint32_t>::max();
183
+ TORCH_CHECK(
184
+ val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value");
185
+ }
186
+
187
+
188
+ template<typename scalar_t, class BinaryFunction>
189
+ __host__ void scan_outer_dim_with_indices(
190
+ const TensorBase& self, const TensorBase& values, const TensorBase& indices,
191
+ int dim, scalar_t init, BinaryFunction binary_op) {
192
+ int64_t row_size = self.size(dim);
193
+ auto sizes = self.sizes();
194
+
195
+ // Treat all outer dimensions (i.e. dim_ < dim) as one.
196
+ const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
197
+
198
+ // Treat all inner dimensions (i.e. dim > dimension) as one.
199
+ const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
200
+ //for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row,
201
+ //make sure that input is not bigger than supported by uint32_t
202
+ check_fits_in_unsigned(num_irows, "num_irows");
203
+ check_fits_in_unsigned(num_orows, "num_orows");
204
+ check_fits_in_unsigned(row_size, "row_size");
205
+
206
+
207
+ dim3 threads(std::min(512, int(num_irows)));
208
+ int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
209
+ dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
210
+ tensor_kernel_scan_outer_dim_with_indices<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
211
+ self.const_data_ptr<scalar_t>(), values.mutable_data_ptr<scalar_t>(), indices.mutable_data_ptr<int64_t>(),
212
+ num_orows, num_irows, row_size, init, binary_op);
213
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
214
+ }
215
+
216
+ template <typename scalar_t, class BinaryFunction>
217
+ __host__ void scan_innermost_dim_with_indices(
218
+ const TensorBase& self, const TensorBase& values, const TensorBase& indices,
219
+ scalar_t init, BinaryFunction binary_op) {
220
+ int ndim = self.dim();
221
+ // Treat all outer dimensions as a single dimension.
222
+ int row_size = self.size(ndim - 1);
223
+ int num_rows = self.numel() / row_size;
224
+
225
+ // assuming max_num_threads per block is 512
226
+ const uint32_t num_threads = 512;
227
+ const uint32_t log_num_threads_x = get_log_num_threads_x_inner_scan<uint32_t>(num_rows, row_size);
228
+ const uint32_t num_threads_x = (1 << log_num_threads_x);
229
+ const uint32_t num_threads_y = num_threads / num_threads_x;
230
+ dim3 threads(num_threads_x, num_threads_y);
231
+ dim3 grid(std::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
232
+
233
+ const uint32_t mem_size = 2 * num_threads * (sizeof(scalar_t) + sizeof(int64_t));
234
+ tensor_kernel_scan_innermost_dim_with_indices<scalar_t><<<grid, threads, mem_size,
235
+ at::cuda::getCurrentCUDAStream()>>>(
236
+ self.const_data_ptr<scalar_t>(), values.mutable_data_ptr<scalar_t>(), indices.mutable_data_ptr<int64_t>(),
237
+ num_rows, row_size, num_threads, log_num_threads_x, init, binary_op);
238
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
239
+ }
240
+
241
+ template<typename scalar_t, typename BinaryFunction>
242
+ void scan_dim_with_indices(const TensorBase& self, const TensorBase& values, const TensorBase& indices, //int64_t dim) {
243
+ int64_t dim, scalar_t init, BinaryFunction binary_op) {
244
+ int ndim = self.dim();
245
+ auto self_ = self.expect_contiguous();
246
+ TORCH_INTERNAL_ASSERT(values.is_contiguous() && indices.is_contiguous());
247
+ if (dim == ndim - 1) {
248
+ scan_innermost_dim_with_indices<scalar_t>(*self_, values, indices, init, binary_op);
249
+ } else {
250
+ scan_outer_dim_with_indices<scalar_t>(*self_, values, indices, dim, init, binary_op);
251
+ }
252
+ }
253
+
254
+ // TODO: The implementation of `tensor_kernel_scan_outer_dim` and
255
+ // `tensor_kernel_scan_innermost_dim` is similar to
256
+ // `tensor_kernel_scan_outer_dim_with_indices`
257
+ // `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
258
+ // remove the duplication.
259
+
260
+ /* Perform an inclusive scan along an outer dimension of a tensor.
261
+ *
262
+ * - num_orows is the size of the flattened outer dimensions;
263
+ * - num_irows is the size of the flattened inner dimensions;
264
+ * - row_size is the size of the dimension along which to scan;
265
+ *
266
+ * The dimensions to the outside and inside of the specified dimension are considered as flattened.
267
+ * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
268
+ * outer dimensions, which contains several "inner rows").
269
+ * Each thread processes a single inner row at a time.
270
+ */
271
+ template<typename scalar_t, class BinaryOp>
272
+ __global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, const scalar_t *src_,
273
+ const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size,
274
+ const scalar_t init, BinaryOp binary_op)
275
+ {
276
+ for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
277
+ for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
278
+ const scalar_t *src = src_ + orow * row_size * num_irows + irow;
279
+ scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
280
+ scalar_t acc = init;
281
+
282
+ for (uint32_t col = 0; col < row_size; ++col) {
283
+ acc = binary_op(acc, c10::load(src));
284
+ *tgt = acc;
285
+
286
+ src += num_irows;
287
+ tgt += num_irows;
288
+ }
289
+ }
290
+ }
291
+ }
292
+
293
+ /* Perform an inclusive scan along the innermost dimension of a tensor.
294
+ *
295
+ * - num_rows is the size of the flattened outer dimensions;
296
+ * - row_size is the size of the innermost dimension;
297
+ *
298
+ * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
299
+ * considered as having 'num_rows' rows of size 'row_size'.
300
+ * Each thread block processes one or more sets of contiguous rows (processing multiple rows
301
+ * per thread block is quicker than processing a single row, especially for short rows).
302
+ */
303
+ template<typename T, class BinaryFunction>
304
+ __device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, const T *src_,
305
+ const uint32_t num_rows, const uint32_t row_size,
306
+ const uint32_t log_num_threads_x,
307
+ T init, BinaryFunction binary_op){
308
+ const uint32_t num_threads_x = 1 << log_num_threads_x;
309
+ for (uint32_t block_row = blockIdx.x * blockDim.y;
310
+ block_row < num_rows;
311
+ block_row += blockDim.y * gridDim.x) {
312
+ uint32_t row = block_row + threadIdx.y;
313
+ T block_total = init;
314
+
315
+ const T *row_src = src_ + row * row_size;
316
+ T *row_tgt = tgt_ + row * row_size;
317
+ const bool row_exists = row < num_rows;
318
+
319
+ // Perform scan on one block at a time, keeping track of the total value of
320
+ // all blocks processed so far.
321
+ for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
322
+ // Load data into shared memory (two values per thread).
323
+ uint32_t col1 = block_col + threadIdx.x;
324
+ uint32_t col2 = block_col + num_threads_x + threadIdx.x;
325
+ if (row_exists) {
326
+ if (col1 < row_size) {
327
+ row_buf[threadIdx.x] = row_src[col1];
328
+ } else {
329
+ row_buf[threadIdx.x] = init;
330
+ }
331
+
332
+ if (col2 < row_size) {
333
+ row_buf[num_threads_x + threadIdx.x] = row_src[col2];
334
+ } else {
335
+ row_buf[num_threads_x + threadIdx.x] = init;
336
+ }
337
+
338
+ // Add the total value of all previous blocks to the first value of this block.
339
+ if (threadIdx.x == 0) {
340
+ row_buf[0] = binary_op(row_buf[0], block_total);
341
+ }
342
+ }
343
+ __syncthreads();
344
+
345
+ // Parallel reduction with Sklansky method. The diagram can be seen on this paper:
346
+ // https://research.nvidia.com/publication/single-pass-parallel-prefix-scan-decoupled-look-back
347
+ for (uint32_t m = 0; m <= log_num_threads_x; ++m) {
348
+ if (row_exists) {
349
+ uint32_t s = 1 << m; // s = 2 ^ m
350
+ uint32_t a = ((threadIdx.x >> m) << (m + 1)) | s; // a = (threadIdx.x / s) * (2 * s) + s
351
+ uint32_t ti = a + (threadIdx.x % s);
352
+ uint32_t si = a - 1;
353
+ row_buf[ti] = binary_op(row_buf[ti], row_buf[si]);
354
+ }
355
+ __syncthreads();
356
+ }
357
+
358
+ // Write back to output.
359
+ if (row_exists) {
360
+ if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
361
+ if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
362
+ }
363
+ block_total = row_buf[2 * num_threads_x - 1];
364
+ __syncthreads();
365
+ }
366
+ }
367
+ }
368
+
369
+ template <
370
+ typename T,
371
+ class BinaryFunction>
372
+ __global__ void tensor_kernel_scan_innermost_dim(
373
+ T* tgt_,
374
+ const T* src_,
375
+ const uint32_t num_rows,
376
+ const uint32_t row_size,
377
+ const uint32_t log_num_threads_x,
378
+ T init,
379
+ BinaryFunction binary_op) {
380
+ alignas(sizeof(double)) extern __shared__ char sbuf[];
381
+ T* sbuf2 = reinterpret_cast<T*>(sbuf);
382
+ const uint32_t num_threads_x = 1 << log_num_threads_x;
383
+ T* row_buf = reinterpret_cast<T*>(sbuf2 + num_threads_x * 2 * threadIdx.y);
384
+
385
+ tensor_kernel_scan_innermost_dim_impl<T>(
386
+ row_buf, tgt_, src_, num_rows, row_size, log_num_threads_x, init, binary_op);
387
+ }
388
+
389
+
390
+ template<typename scalar_t, class BinaryFunction>
391
+ __host__ void scan_outer_dim(const TensorBase& self, const TensorBase& result,
392
+ int dim, scalar_t init, BinaryFunction binary_op) {
393
+ const int64_t row_size = self.size(dim);
394
+ auto sizes = self.sizes();
395
+
396
+ // Treat all outer dimensions (i.e. dim_ < dim) as one.
397
+ const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
398
+
399
+ // Treat all inner dimensions (i.e. dim > dimension) as one.
400
+ const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
401
+
402
+ dim3 threads(std::min(512, int(num_irows)));
403
+ int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
404
+ dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
405
+
406
+ check_fits_in_unsigned(num_irows, "num_irows");
407
+ check_fits_in_unsigned(num_orows, "num_orows");
408
+ check_fits_in_unsigned(row_size, "row_size");
409
+
410
+ tensor_kernel_scan_outer_dim<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
411
+ result.mutable_data_ptr<scalar_t>(), self.const_data_ptr<scalar_t>(),
412
+ num_orows, num_irows, row_size, init, binary_op);
413
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
414
+ }
415
+
416
+ template <typename scalar_t, class BinaryFunction>
417
+ void scan_innermost_dim(const TensorBase& self, const TensorBase& result,
418
+ scalar_t init, BinaryFunction binary_op) {
419
+ int64_t ndim = self.dim();
420
+ // Treat all outer dimensions as a single dimension.
421
+ int64_t row_size = self.size(ndim - 1);
422
+ int64_t num_rows = self.numel() / row_size;
423
+
424
+ // assuming max_num_threads per block is 512
425
+ const uint32_t num_threads = 512;
426
+ const uint32_t log_num_threads_x = get_log_num_threads_x_inner_scan<uint32_t>(num_rows, row_size);
427
+ const uint32_t num_threads_x = (1 << log_num_threads_x);
428
+ const uint32_t num_threads_y = num_threads / num_threads_x;
429
+ dim3 threads(num_threads_x, num_threads_y);
430
+ int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
431
+ dim3 grid(std::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
432
+
433
+ check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
434
+ check_fits_in_unsigned(row_size, "row_size");
435
+
436
+ tensor_kernel_scan_innermost_dim<scalar_t><<<grid, threads, num_threads * 2 * sizeof(scalar_t),
437
+ at::cuda::getCurrentCUDAStream()>>>(
438
+ result.mutable_data_ptr<scalar_t>(), self.const_data_ptr<scalar_t>(),
439
+ num_rows, row_size, log_num_threads_x, init, binary_op);
440
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
441
+ }
442
+
443
+ template<typename scalar_t, typename BinaryFunction>
444
+ void scan_dim(const TensorBase& self, const TensorBase& result,
445
+ int64_t dim, scalar_t init, BinaryFunction binary_op) {
446
+ int ndim = self.dim();
447
+ auto self_ = self.expect_contiguous();
448
+ TORCH_INTERNAL_ASSERT(result.is_contiguous());
449
+
450
+ if (self.numel() == self.size(dim)) {
451
+ cuda::cub::inclusive_scan(self_->const_data_ptr<scalar_t>(), result.mutable_data_ptr<scalar_t>(), binary_op, self.numel());
452
+ } else if (dim == ndim - 1) {
453
+ scan_innermost_dim<scalar_t>(*self_, result, init, binary_op);
454
+ } else {
455
+ scan_outer_dim<scalar_t>(*self_, result, dim, init, binary_op);
456
+ }
457
+ }
458
+
459
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sort.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <ATen/native/cuda/SortStable.h>
5
+
6
+ namespace at {
7
+ namespace native {
8
+
9
+ inline bool should_use_small_sort(const TensorBase &self, int64_t dim) {
10
+ return self.size(dim) <= 4096;
11
+ }
12
+
13
+ void sortKeyValueInplace(
14
+ const TensorBase &key, const TensorBase &value, int dim,
15
+ bool descending, bool stable=false);
16
+
17
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortStable.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorBase.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ namespace native {
7
+
8
+ // Stable-sort self into values, and set indices to the
9
+ // inverse-permutation from values back to self.
10
+ // Output tensors must be pre-allocated and contiguous.
11
+ void launch_stable_sort_kernel(
12
+ const TensorBase& self,
13
+ int64_t dim,
14
+ bool descending,
15
+ const TensorBase& values,
16
+ const TensorBase& indices);
17
+
18
+ } // namespace native
19
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortUtils.cuh ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <c10/util/Optional.h>
4
+
5
+ #include <ATen/cuda/cub.cuh>
6
+ #include <ATen/cuda/detail/TensorInfo.cuh>
7
+ #include <ATen/cuda/CUDAContext.h>
8
+ #include <ATen/cuda/DeviceUtils.cuh>
9
+ #include <ATen/native/cuda/SortingCommon.cuh>
10
+ #include <ATen/native/cuda/Sort.h>
11
+ #include <ATen/native/StridedRandomAccessor.h>
12
+
13
+ #define HAS_WARP_MERGE_SORT() (CUDA_VERSION >= 110600)
14
+
15
+
16
+ namespace at { namespace native {
17
+
18
+ template <typename T>
19
+ __device__ inline void swapVars(T& t1, T& t2) {
20
+ T tmp = t1;
21
+ t1 = t2;
22
+ t2 = tmp;
23
+ }
24
+
25
+ template <typename Comparator, typename K, typename V>
26
+ __device__ inline void bitonicSwap(K& kA, V& vA, bool& validA,
27
+ K& kB, V& vB, bool& validB,
28
+ bool dir,
29
+ const Comparator& comp) {
30
+ // Invalid entries always sort to the end
31
+ bool swap = (comp(kA, kB) && validA) || !validB;
32
+ if (swap == dir) {
33
+ swapVars(kA, kB);
34
+ swapVars(vA, vB);
35
+ swapVars(validA, validB);
36
+ }
37
+ };
38
+
39
+ template <int Power2SortSize, typename IndexType, typename Comparator,
40
+ typename K, typename V>
41
+ __device__ inline void bitonicSort(K *keys,
42
+ V *values,
43
+ bool *valid,
44
+ const Comparator& comp) {
45
+ #if !defined(USE_ROCM)
46
+ #pragma unroll
47
+ #endif
48
+ for (unsigned int size = 2; size < Power2SortSize; size *= 2) {
49
+ bool flag = ((threadIdx.x & (size / 2)) != 0);
50
+
51
+ #if !defined(USE_ROCM)
52
+ #pragma unroll
53
+ #endif
54
+ for (unsigned int stride = size / 2; stride > 0; stride /= 2) {
55
+
56
+ __syncthreads();
57
+
58
+ unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
59
+ bitonicSwap<Comparator, K, V>(
60
+ keys[pos], values[pos], valid[pos],
61
+ keys[pos + stride], values[pos + stride], valid[pos + stride],
62
+ flag, comp);
63
+ }
64
+ }
65
+
66
+ #if !defined(USE_ROCM)
67
+ #pragma unroll
68
+ #endif
69
+ for (unsigned int stride = Power2SortSize / 2; stride > 0; stride /= 2) {
70
+
71
+ __syncthreads();
72
+
73
+ unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
74
+ bitonicSwap<Comparator, K, V>(
75
+ keys[pos], values[pos], valid[pos],
76
+ keys[pos + stride], values[pos + stride], valid[pos + stride],
77
+ false, comp);
78
+ }
79
+
80
+ __syncthreads();
81
+
82
+ }
83
+
84
+ // at::cuda::detail::TensorInfo version
85
+ // Sorts (key, value) pairs (in different tensors) in-place; i.e.,
86
+ // modifies the input `keys` and `values`
87
+ template <int KeyDims, int ValueDims, int block_dim_x, int max_block_dim_y,
88
+ typename K, typename V, typename Comparator, typename IndexType>
89
+ C10_LAUNCH_BOUNDS_1(block_dim_x * max_block_dim_y)
90
+ __global__ void
91
+ bitonicSortKVInPlace(at::cuda::detail::TensorInfo<K, IndexType> keys,
92
+ IndexType keySlices,
93
+ IndexType keySliceSize,
94
+ IndexType keySliceStride,
95
+ at::cuda::detail::TensorInfo<V, IndexType> values,
96
+ IndexType valueSliceStride,
97
+ Comparator comp) {
98
+ // Find the slice of the tensor that we are sorting
99
+ // NOTE: blockDim.y may be less max_block_dim_y
100
+ const IndexType blockIndex = getLinearBlockId<IndexType>();
101
+ const IndexType linearIndex = blockIndex * blockDim.y + threadIdx.y;
102
+
103
+ // If the entire block is out of bounds exit early
104
+ if (blockIndex * blockDim.y >= keySlices) {
105
+ return;
106
+ }
107
+ // It's also possible for some rows of a block to be out of bounds
108
+ // but all thread need to run for __syncthreads to work.
109
+ const bool row_valid = linearIndex < keySlices;
110
+
111
+ constexpr int items_per_thread = 2;
112
+ constexpr int Power2SortSize = block_dim_x * items_per_thread;
113
+
114
+ // Storage for max_block_dim_y sorts performed in parallel
115
+ __shared__ K blockSharedKeys[max_block_dim_y][Power2SortSize];
116
+ __shared__ V blockSharedValues[max_block_dim_y][Power2SortSize];
117
+ __shared__ bool blockSharedValid[max_block_dim_y][Power2SortSize];
118
+
119
+ auto sharedKeys = blockSharedKeys[threadIdx.y];
120
+ auto sharedValues = blockSharedValues[threadIdx.y];
121
+ auto sharedValid = blockSharedValid[threadIdx.y];
122
+
123
+ const IndexType keyStartOffset =
124
+ at::cuda::detail::IndexToOffset<K, IndexType, KeyDims>::get(linearIndex, keys);
125
+ const IndexType valueStartOffset =
126
+ at::cuda::detail::IndexToOffset<V, IndexType, ValueDims>::get(linearIndex, values);
127
+
128
+ // Load 2 values per thread into the shared workspace
129
+ #pragma unroll
130
+ for (int k = 0; k < items_per_thread; ++k) {
131
+ auto idx = threadIdx.x + k * blockDim.x;
132
+ bool valid = row_valid && idx < keySliceSize;
133
+
134
+ sharedKeys[idx] = valid ?
135
+ keys.data[idx * keySliceStride + keyStartOffset] : K{};
136
+ sharedValues[idx] = valid ?
137
+ values.data[idx * valueSliceStride + valueStartOffset] : V{};
138
+ sharedValid[idx] = valid;
139
+ }
140
+
141
+ // Sort!
142
+ bitonicSort<Power2SortSize, IndexType>(
143
+ sharedKeys, sharedValues, sharedValid, comp);
144
+
145
+ if (!row_valid) {
146
+ return;
147
+ }
148
+
149
+ // Store outputs
150
+ #pragma unroll
151
+ for (int k = 0; k < items_per_thread; ++k) {
152
+ auto idx = threadIdx.x + k * blockDim.x;
153
+ if (idx < keySliceSize) {
154
+ keys.data[idx * keySliceStride + keyStartOffset] = sharedKeys[idx];
155
+ values.data[idx * valueSliceStride + valueStartOffset] = sharedValues[idx];
156
+ }
157
+ }
158
+ }
159
+
160
+ #if HAS_WARP_MERGE_SORT()
161
+
162
+ template <int KeyDims, int ValueDims, int sort_size, int max_block_dim_y,
163
+ typename K, typename V, typename Comparator, typename IndexType>
164
+ C10_LAUNCH_BOUNDS_1(C10_WARP_SIZE * max_block_dim_y)
165
+ __global__ void
166
+ warpMergeSortKVInPlace(
167
+ at::cuda::detail::TensorInfo<K, IndexType> keys,
168
+ IndexType keySlices,
169
+ IndexType keySliceSize,
170
+ IndexType keySliceStride,
171
+ at::cuda::detail::TensorInfo<V, IndexType> values,
172
+ IndexType valueSliceStride,
173
+ Comparator comp,
174
+ K invalid_key) {
175
+ // Find the slice of the tensor that we are sorting
176
+ // NOTE: blockDim.y may be less max_block_dim_y
177
+ const IndexType blockIndex = getLinearBlockId<IndexType>();
178
+ const IndexType linearIndex = blockIndex * blockDim.y + threadIdx.y;
179
+
180
+ // If this row is out of bounds exit early
181
+ if (linearIndex >= keySlices) {
182
+ return;
183
+ }
184
+
185
+ const IndexType keyStartOffset =
186
+ at::cuda::detail::IndexToOffset<K, IndexType, KeyDims>::get(linearIndex, keys);
187
+ const IndexType valueStartOffset =
188
+ at::cuda::detail::IndexToOffset<V, IndexType, ValueDims>::get(linearIndex, values);
189
+
190
+ K *keys_slice = &keys.data[keyStartOffset];
191
+ V *values_slice = &values.data[valueStartOffset];
192
+
193
+ StridedRandomAccessor<K, IndexType> keys_iter(keys_slice, keySliceStride);
194
+ StridedRandomAccessor<V, IndexType> values_iter(values_slice, valueSliceStride);
195
+
196
+ namespace cub = ROCM_HIPCUB(at_cuda_detail::cub);
197
+
198
+ CUDA_KERNEL_ASSERT(blockDim.x == C10_WARP_SIZE);
199
+ CUDA_KERNEL_ASSERT(blockDim.y <= max_block_dim_y);
200
+ constexpr int items_per_thread = sort_size / C10_WARP_SIZE;
201
+ static_assert(
202
+ items_per_thread * C10_WARP_SIZE == sort_size,
203
+ "sort_size must be a multiple of C10_WARP_SIZE");
204
+
205
+
206
+ using LoadKeys = cub::WarpLoad<K, items_per_thread, cub::WARP_LOAD_TRANSPOSE>;
207
+ using LoadValues = cub::WarpLoad<V, items_per_thread, cub::WARP_LOAD_TRANSPOSE>;
208
+ using Sort = cub::WarpMergeSort<K, items_per_thread, C10_WARP_SIZE, V>;
209
+ using StoreKeys = cub::WarpStore<K, items_per_thread, cub::WARP_STORE_TRANSPOSE>;
210
+ using StoreValues = cub::WarpStore<V, items_per_thread, cub::WARP_STORE_TRANSPOSE>;
211
+
212
+ __shared__ union {
213
+ typename LoadKeys::TempStorage load_keys;
214
+ typename LoadValues::TempStorage load_values;
215
+ typename Sort::TempStorage sort;
216
+ typename StoreKeys::TempStorage store_keys;
217
+ typename StoreValues::TempStorage store_values;
218
+ } tmp_storage[max_block_dim_y];
219
+
220
+ auto& warp_storage = tmp_storage[threadIdx.y];
221
+
222
+ // Load inputs
223
+ K local_keys[items_per_thread];
224
+ V local_values[items_per_thread];
225
+
226
+ const auto invalid_value = V{};
227
+ LoadKeys(warp_storage.load_keys).Load(keys_iter, local_keys, keySliceSize, invalid_key);
228
+ WARP_SYNC();
229
+ LoadValues(warp_storage.load_values).Load(values_iter, local_values, keySliceSize, invalid_value);
230
+ WARP_SYNC();
231
+
232
+ // Sort! We use stable sort to ensure that invalid values are never
233
+ // sorted before valid values. In testing it performed the same as
234
+ // .Sort, so there is no down-side.
235
+ Sort(warp_storage.sort).StableSort(
236
+ local_keys, local_values, comp, keySliceSize, invalid_key);
237
+ WARP_SYNC();
238
+
239
+ // Store outputs
240
+ StoreKeys(warp_storage.store_keys).Store(keys_iter, local_keys, keySliceSize);
241
+ WARP_SYNC();
242
+ StoreValues(warp_storage.store_values).Store(values_iter, local_values, keySliceSize);
243
+ }
244
+
245
+ #endif // HAS_WARP_MERGE_SORT()
246
+
247
+ template <int KeyDims, int ValueDims,
248
+ int block_size, int items_per_thread,
249
+ typename K, typename V, typename IndexType>
250
+ C10_LAUNCH_BOUNDS_1(block_size)
251
+ __global__ void
252
+ radixSortKVInPlace(at::cuda::detail::TensorInfo<K, IndexType> keys,
253
+ IndexType keySlices,
254
+ IndexType keySliceSize,
255
+ IndexType keySliceStride,
256
+ at::cuda::detail::TensorInfo<V, IndexType> values,
257
+ IndexType valueSliceStride,
258
+ bool descending) {
259
+ static_assert(block_size > 0, "");
260
+
261
+ // Find the slice of the tensor that we are sorting
262
+ const IndexType linearIndex = getLinearBlockId<IndexType>();
263
+ // Tiling the slices could have us be out of bounds, if there are a
264
+ // lot of slices to sort
265
+ if (linearIndex >= keySlices) {
266
+ return;
267
+ }
268
+
269
+ const IndexType keyStartOffset =
270
+ at::cuda::detail::IndexToOffset<K, IndexType, KeyDims>::get(linearIndex, keys);
271
+ const IndexType valueStartOffset =
272
+ at::cuda::detail::IndexToOffset<V, IndexType, ValueDims>::get(linearIndex, values);
273
+
274
+ K *keys_slice = &keys.data[keyStartOffset];
275
+ V *values_slice = &values.data[valueStartOffset];
276
+
277
+ StridedRandomAccessor<K, IndexType> keys_iter(keys_slice, keySliceStride);
278
+ StridedRandomAccessor<V, IndexType> values_iter(values_slice, valueSliceStride);
279
+
280
+ namespace cub = ROCM_HIPCUB(at_cuda_detail::cub);
281
+
282
+ using key_t = typename at::cuda::cub::detail::cuda_type<K>::type;
283
+ using LoadKeys = cub::BlockLoad<K, block_size, items_per_thread,
284
+ cub::BlockLoadAlgorithm::BLOCK_LOAD_TRANSPOSE>;
285
+ using LoadValues = cub::BlockLoad<V, block_size, items_per_thread,
286
+ cub::BlockLoadAlgorithm::BLOCK_LOAD_TRANSPOSE>;
287
+ using Sort = cub::BlockRadixSort<key_t, block_size, items_per_thread, V>;
288
+ using StoreKeys = cub::BlockStore<K, block_size, items_per_thread,
289
+ cub::BLOCK_STORE_TRANSPOSE>;
290
+ using StoreValues = cub::BlockStore<V, block_size, items_per_thread,
291
+ cub::BLOCK_STORE_TRANSPOSE>;
292
+
293
+ __shared__ union {
294
+ typename LoadKeys::TempStorage load_keys;
295
+ typename LoadValues::TempStorage load_values;
296
+ typename Sort::TempStorage sort;
297
+ typename StoreKeys::TempStorage store_keys;
298
+ typename StoreValues::TempStorage store_values;
299
+ } tmp_storage;
300
+
301
+ // cub's Block operations operate on a fixed number of items, but the
302
+ // actual slice we are sorting might be smaller. So, we need to make
303
+ // up the difference with keys that will always sort higher.
304
+ const K invalid_key = [descending] {
305
+ using radix_t = typename cub::Traits<key_t>::UnsignedBits;
306
+ union {
307
+ K key;
308
+ radix_t radix;
309
+ } tmp;
310
+ tmp.radix = descending ?
311
+ cub::Traits<key_t>::LOWEST_KEY :
312
+ cub::Traits<key_t>::MAX_KEY;
313
+ return tmp.key;
314
+ }();
315
+ const V invalid_value = static_cast<V>(0);
316
+
317
+ // Load inputs
318
+ K local_keys[items_per_thread];
319
+ V local_values[items_per_thread];
320
+
321
+ LoadKeys(tmp_storage.load_keys).Load(keys_iter, local_keys, keySliceSize, invalid_key);
322
+ __syncthreads();
323
+ LoadValues(tmp_storage.load_values).Load(values_iter, local_values, keySliceSize, invalid_value);
324
+ __syncthreads();
325
+
326
+ // Sort!
327
+ if (descending) {
328
+ Sort(tmp_storage.sort).SortDescending(
329
+ reinterpret_cast<key_t (&)[items_per_thread]>(local_keys),
330
+ local_values);
331
+ } else {
332
+ Sort(tmp_storage.sort).Sort(
333
+ reinterpret_cast<key_t (&)[items_per_thread]>(local_keys),
334
+ local_values);
335
+ }
336
+ __syncthreads();
337
+
338
+ // Store outputs
339
+ StoreKeys(tmp_storage.store_keys).Store(keys_iter, local_keys, keySliceSize);
340
+ __syncthreads();
341
+ StoreValues(tmp_storage.store_values).Store(values_iter, local_values, keySliceSize);
342
+ }
343
+
344
+ }} // at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sorting.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+ }
7
+
8
+ namespace at {
9
+ namespace native {
10
+
11
+ void launch_kthvalue_kernel(
12
+ const TensorBase &values, const TensorBase &indices,
13
+ const TensorBase &self, int64_t dim, int64_t k);
14
+ void launch_median_kernel(
15
+ const TensorBase &vals, const TensorBase &inds,
16
+ const TensorBase &in, int64_t dim, bool ignore_nan);
17
+
18
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortingRadixSelect.cuh ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ceil_div.h>
2
+ #include <ATen/cuda/Atomic.cuh>
3
+ #include <ATen/cuda/DeviceUtils.cuh>
4
+ #include <ATen/cuda/AsmUtils.cuh>
5
+ #include <c10/macros/Macros.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ template <typename scalar_t>
11
+ struct TopKTypeConfig {};
12
+
13
+ template <>
14
+ struct TopKTypeConfig<float> {
15
+ typedef uint32_t RadixType;
16
+
17
+ // Converts a float to an integer representation with the same
18
+ // sorting; i.e., for floats f1, f2:
19
+ // if f1 < f2 then convert(f1) < convert(f2)
20
+ // We use this to enable radix selection of floating-point values.
21
+ // This also gives a relative order for NaNs, but that's ok, as they
22
+ // will all be adjacent
23
+ // neg inf: signbit=1 exp=ff fraction=0 --> radix = 0 00 ff..
24
+ // pos inf: signbit=0 exp=ff fraction=0 --> radix = 1 ff 00..
25
+ // pos nan: signbit=0 exp=ff fraction>0 --> radix = 1 ff x>0
26
+ // neg nan: signbit=1 exp=ff fraction>0 --> radix = 0 00 x<ff...
27
+ static inline __device__ RadixType convert(float v) {
28
+ RadixType x = __float_as_int(v);
29
+ RadixType mask = (x & 0x80000000) ? 0xffffffff : 0x80000000;
30
+
31
+ return (v == v) ? (x ^ mask) : 0xffffffff;
32
+ }
33
+
34
+ static inline __device__ float deconvert(RadixType v) {
35
+ RadixType mask = (v & 0x80000000) ? 0x80000000 : 0xffffffff;
36
+
37
+ return __int_as_float(v ^ mask);
38
+ }
39
+ };
40
+
41
+ template <>
42
+ struct TopKTypeConfig<uint8_t> {
43
+ typedef uint32_t RadixType;
44
+
45
+ static inline __device__ RadixType convert(uint8_t v) {
46
+ return v;
47
+ }
48
+
49
+ static inline __device__ uint8_t deconvert(RadixType v) {
50
+ return v;
51
+ }
52
+ };
53
+
54
+ template <>
55
+ struct TopKTypeConfig<int8_t> {
56
+ typedef uint32_t RadixType;
57
+
58
+ static inline __device__ RadixType convert(int8_t v) {
59
+ return 128u + v;
60
+ }
61
+
62
+ static inline __device__ int8_t deconvert(RadixType v) {
63
+ return v - 128;
64
+ }
65
+ };
66
+
67
+ template <>
68
+ struct TopKTypeConfig<int16_t> {
69
+ typedef uint32_t RadixType;
70
+
71
+ static inline __device__ RadixType convert(int16_t v) {
72
+ static_assert(sizeof(short) == 2, "");
73
+ return 32768u + v;
74
+ }
75
+
76
+ static inline __device__ int16_t deconvert(RadixType v) {
77
+ return v - 32768;
78
+ }
79
+ };
80
+
81
+ template <>
82
+ struct TopKTypeConfig<int32_t> {
83
+ typedef uint32_t RadixType;
84
+
85
+ static inline __device__ RadixType convert(int32_t v) {
86
+ static_assert(sizeof(int) == 4, "");
87
+ return 2147483648u + v;
88
+ }
89
+
90
+ static inline __device__ int32_t deconvert(RadixType v) {
91
+ return v - 2147483648u;
92
+ }
93
+ };
94
+
95
+ template <>
96
+ struct TopKTypeConfig<int64_t> {
97
+ typedef uint64_t RadixType;
98
+
99
+ static inline __device__ RadixType convert(int64_t v) {
100
+ static_assert(sizeof(int64_t) == 8, "");
101
+ return 9223372036854775808ull + v;
102
+ }
103
+
104
+ static inline __device__ int64_t deconvert(RadixType v) {
105
+ return v - 9223372036854775808ull;
106
+ }
107
+ };
108
+
109
+ template <>
110
+ struct TopKTypeConfig<double> {
111
+ typedef uint64_t RadixType;
112
+
113
+ static inline __device__ RadixType convert(double v) {
114
+ RadixType x = __double_as_longlong(v);
115
+ RadixType mask = -((x >> 63)) | 0x8000000000000000;
116
+ return (v == v) ? (x ^ mask) : 0xffffffffffffffff;
117
+ }
118
+
119
+ static inline __device__ double deconvert(RadixType v) {
120
+ RadixType mask = ((v >> 63) - 1) | 0x8000000000000000;
121
+ return __longlong_as_double(v ^ mask);
122
+ }
123
+ };
124
+
125
+ template <>
126
+ struct TopKTypeConfig<at::Half> {
127
+ typedef uint32_t RadixType;
128
+
129
+ static inline __device__ RadixType convert(at::Half v) {
130
+ #if defined(__CUDA_ARCH__) || defined(USE_ROCM)
131
+ RadixType x = __half_as_ushort(v);
132
+ RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000;
133
+ return (v == v) ? (x ^ mask) : 0xffff;
134
+ #else
135
+ CUDA_KERNEL_ASSERT(false);
136
+ return 0u;
137
+ #endif
138
+ }
139
+
140
+ static inline __device__ at::Half deconvert(RadixType v) {
141
+ #if defined(__CUDA_ARCH__) || defined(USE_ROCM)
142
+ RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff;
143
+ return __ushort_as_half(v ^ mask);
144
+ #else
145
+ CUDA_KERNEL_ASSERT(false);
146
+ return static_cast<at::Half>(0);
147
+ #endif
148
+ }
149
+ };
150
+
151
+ template <>
152
+ struct TopKTypeConfig<at::BFloat16> {
153
+ typedef uint32_t RadixType;
154
+
155
+ static inline __device__ RadixType convert(at::BFloat16 v) {
156
+ RadixType x = v.x;
157
+ RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000;
158
+ return (v == v) ? (x ^ mask) : 0xffff;
159
+ }
160
+
161
+ static inline __device__ at::BFloat16 deconvert(RadixType v) {
162
+ RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff;
163
+ at::BFloat16 r;
164
+ r.x = (v ^ mask);
165
+ return r;
166
+ }
167
+ };
168
+
169
+ // This function counts the distribution of all input values in a
170
+ // slice we are selecting by radix digit at `radixDigitPos`, but only
171
+ // those that pass the filter `((v & desiredMask) == desired)`.
172
+ // This produces and broadcasts the seen counts for a single block only.
173
+ // `smem` must have at least `RadixSize` elements.
174
+ template <
175
+ typename scalar_t,
176
+ typename bitwise_t,
177
+ typename index_t,
178
+ typename CountType,
179
+ int RadixSize,
180
+ int RadixBits>
181
+ __device__ void countRadixUsingMask(
182
+ CountType counts[RadixSize],
183
+ CountType* smem,
184
+ bitwise_t desired,
185
+ bitwise_t desiredMask,
186
+ int radixDigitPos,
187
+ index_t sliceSize,
188
+ index_t withinSliceStride,
189
+ const scalar_t* data) {
190
+ // Clear out per-thread counts from a previous round
191
+ #pragma unroll
192
+ for (int i = 0; i < RadixSize; ++i) {
193
+ counts[i] = 0;
194
+ }
195
+
196
+ if (threadIdx.x < RadixSize) {
197
+ smem[threadIdx.x] = 0;
198
+ }
199
+ __syncthreads();
200
+
201
+ // Scan over all the data. Upon a read, the warp will accumulate
202
+ // counts per each digit in the radix using warp voting.
203
+ #if !defined(USE_ROCM)
204
+ // Must be called outside of loop to ensure all threads participate
205
+ unsigned mask = WARP_BALLOT(threadIdx.x < sliceSize);
206
+ #endif
207
+ for (index_t i = threadIdx.x; i < sliceSize;) {
208
+ bitwise_t val =
209
+ TopKTypeConfig<scalar_t>::convert(doLdg(&data[i * withinSliceStride]));
210
+
211
+ bool hasVal = ((val & desiredMask) == desired);
212
+ bitwise_t digitInRadix = at::cuda::Bitfield<bitwise_t>::getBitfield(
213
+ val, radixDigitPos, RadixBits);
214
+
215
+ #pragma unroll
216
+ for (uint32_t j = 0; j < RadixSize; ++j) {
217
+ bool vote = hasVal && (digitInRadix == j);
218
+ #if defined(USE_ROCM)
219
+ counts[j] += __popcll(WARP_BALLOT(vote));
220
+ #else
221
+ counts[j] += __popc(WARP_BALLOT(vote, mask));
222
+ #endif
223
+ }
224
+ i += blockDim.x;
225
+ #if !defined(USE_ROCM)
226
+ mask = WARP_BALLOT(i < sliceSize, mask);
227
+ #endif
228
+ }
229
+
230
+ // Now, for each warp, sum values
231
+ if (at::cuda::getLaneId() == 0) {
232
+ #pragma unroll
233
+ for (uint32_t i = 0; i < RadixSize; ++i) {
234
+ gpuAtomicAddNoReturn(&smem[i], counts[i]);
235
+ }
236
+ }
237
+
238
+ __syncthreads();
239
+
240
+ // For each thread, read in the total counts
241
+ #pragma unroll
242
+ for (uint32_t i = 0; i < RadixSize; ++i) {
243
+ counts[i] = smem[i];
244
+ }
245
+
246
+ __syncthreads();
247
+ }
248
+
249
+ // Over what radix we are selecting values
250
+ constexpr int RADIX_BITS = 2; // digits are base-(2 ^ RADIX_BITS)
251
+ constexpr int RADIX_SIZE = 4; // 2 ^ RADIX_BITS
252
+ constexpr int RADIX_MASK = (RADIX_SIZE - 1);
253
+
254
+ // This finds the unique value `v` that matches the pattern
255
+ // ((v & desired) == desiredMask) in our sorted int format
256
+ template <typename scalar_t, typename bitwise_t, typename index_t>
257
+ __device__ scalar_t findPattern(
258
+ scalar_t* smem,
259
+ const scalar_t* data,
260
+ index_t sliceSize,
261
+ index_t withinSliceStride,
262
+ bitwise_t desired,
263
+ bitwise_t desiredMask) {
264
+ if (threadIdx.x < 2) {
265
+ smem[threadIdx.x] = static_cast<scalar_t>(0);
266
+ }
267
+ __syncthreads();
268
+
269
+ // All threads participate in the loop, in order to sync on the flag
270
+ index_t numIterations =
271
+ round_up(sliceSize, static_cast<index_t>(blockDim.x));
272
+ for (index_t i = threadIdx.x; i < numIterations; i += blockDim.x) {
273
+ bool inRange = (i < sliceSize);
274
+ scalar_t v = inRange ? doLdg(&data[i * withinSliceStride])
275
+ : static_cast<scalar_t>(0);
276
+
277
+ if (inRange &&
278
+ ((TopKTypeConfig<scalar_t>::convert(v) & desiredMask) == desired)) {
279
+ // There should not be conflicts if we are using findPattern,
280
+ // since the result is unique
281
+ smem[0] = static_cast<scalar_t>(1);
282
+ smem[1] = v; // can't use val as the flag, since it could be 0
283
+ }
284
+
285
+ __syncthreads();
286
+
287
+ scalar_t found = smem[0];
288
+ scalar_t val = smem[1];
289
+
290
+ __syncthreads();
291
+
292
+ // Check to see if a thread found the value
293
+ if (found != static_cast<scalar_t>(0)) {
294
+ // all threads return this value
295
+ return val;
296
+ }
297
+ }
298
+
299
+ // should not get here
300
+ CUDA_KERNEL_ASSERT(false);
301
+ return static_cast<scalar_t>(0);
302
+ }
303
+
304
+ // Returns the top-Kth element found in the data using radix selection
305
+ template <typename scalar_t, typename bitwise_t, typename index_t>
306
+ __device__ void radixSelect(
307
+ const scalar_t* data,
308
+ index_t k,
309
+ bool largest,
310
+ index_t sliceSize,
311
+ index_t withinSliceStride,
312
+ int* smem,
313
+ scalar_t* topK) {
314
+ // Per-thread buckets into which we accumulate digit counts in our
315
+ // radix
316
+ int counts[RADIX_SIZE];
317
+
318
+ // We only consider elements x such that (x & desiredMask) == desired
319
+ // Initially, we consider all elements of the array, so the above
320
+ // statement is true regardless of input.
321
+ bitwise_t desired = 0;
322
+ bitwise_t desiredMask = 0;
323
+
324
+ // We are looking for the top kToFind-th element when iterating over
325
+ // digits; this count gets reduced by elimination when counting
326
+ // successive digits
327
+ int kToFind = k;
328
+
329
+ // We start at the most significant digit in our radix, scanning
330
+ // through to the least significant digit
331
+ for (int digitPos = sizeof(scalar_t) * 8 - RADIX_BITS; digitPos >= 0;
332
+ digitPos -= RADIX_BITS) {
333
+ // Count radix distribution for the current position and reduce
334
+ // across all threads
335
+ countRadixUsingMask<
336
+ scalar_t,
337
+ bitwise_t,
338
+ index_t,
339
+ int,
340
+ RADIX_SIZE,
341
+ RADIX_BITS>(
342
+ counts,
343
+ smem,
344
+ desired,
345
+ desiredMask,
346
+ digitPos,
347
+ sliceSize,
348
+ withinSliceStride,
349
+ data);
350
+
351
+ auto found_unique = [&](int i, int count) -> bool {
352
+ /* All threads have the same value in counts here, so all */
353
+ /* threads will return from the function. */
354
+ if (count == 1 && kToFind == 1) {
355
+ /* There is a unique answer. */
356
+ desired = at::cuda::Bitfield<bitwise_t>::setBitfield(
357
+ desired, i, digitPos, RADIX_BITS);
358
+ desiredMask = at::cuda::Bitfield<bitwise_t>::setBitfield(
359
+ desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
360
+
361
+ /* The answer is now the unique element v such that: */
362
+ /* (v & desiredMask) == desired */
363
+ /* However, we do not yet know what the actual element is. We */
364
+ /* need to perform a search through the data to find the */
365
+ /* element that matches this pattern. */
366
+ *topK = findPattern<scalar_t, bitwise_t, index_t>(
367
+ (scalar_t*)smem,
368
+ data,
369
+ sliceSize,
370
+ withinSliceStride,
371
+ desired,
372
+ desiredMask);
373
+ return true;
374
+ }
375
+ return false;
376
+ };
377
+ auto found_non_unique = [&](int i, int count) -> bool {
378
+ if (count >= kToFind) {
379
+ desired =
380
+ at::cuda::Bitfield<bitwise_t>::setBitfield(
381
+ desired, i, digitPos, RADIX_BITS);
382
+ desiredMask = at::cuda::Bitfield<bitwise_t>::setBitfield(
383
+ desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
384
+
385
+ /* The top-Kth element v must now be one such that: */
386
+ /* (v & desiredMask == desired) */
387
+ /* but we haven't narrowed it down; we must check the next */
388
+ /* least-significant digit */
389
+ return true;
390
+ }
391
+ kToFind -= count;
392
+ return false; // continue the loop
393
+ };
394
+
395
+ // All threads participate in the comparisons below to know the
396
+ // final result
397
+ if (largest) {
398
+ // Process in descending order
399
+ #pragma unroll
400
+ for (int i = RADIX_SIZE - 1; i >= 0; --i) {
401
+ int count = counts[i];
402
+ if (found_unique(i, count)) {
403
+ return;
404
+ }
405
+ if (found_non_unique(i, count)) {
406
+ break;
407
+ }
408
+ }
409
+ } else {
410
+ // Process in ascending order
411
+ #pragma unroll
412
+ for (int i = 0; i < RADIX_SIZE; ++i) {
413
+ int count = counts[i];
414
+ if (found_unique(i, count)) {
415
+ return;
416
+ }
417
+ if (found_non_unique(i, count)) {
418
+ break;
419
+ }
420
+ }
421
+ }
422
+ } // end digitPos for
423
+
424
+ // There is no unique result, but there is a non-unique result
425
+ // matching `desired` exactly
426
+ *topK = TopKTypeConfig<scalar_t>::deconvert(desired);
427
+ }
428
+ } // namespace native
429
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/UniqueCub.cuh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+
3
+ namespace at {
4
+ namespace native {
5
+ namespace internal {
6
+
7
+ template <typename scalar_t>
8
+ std::tuple<Tensor, Tensor, Tensor> unique_cuda_template(
9
+ const Tensor& self,
10
+ const bool consecutive,
11
+ const bool return_inverse,
12
+ const bool return_counts);
13
+
14
+ } // namespace internal
15
+ } // namespace at
16
+ } // namespace native
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/im2col.cuh ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/AccumulateType.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/cuda/detail/KernelUtils.h>
6
+
7
+ #include <c10/macros/Macros.h>
8
+
9
+ namespace at {
10
+ namespace native {
11
+
12
+ using namespace at::cuda::detail;
13
+
14
+ // Kernel for fast unfold+copy
15
+ // (borrowed from Caffe:
16
+ // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu)
17
+ // CUDA_NUM_THREADS = 1024
18
+
19
+ template <typename dt>
20
+ C10_LAUNCH_BOUNDS_1(1024)
21
+ __global__ void im2col_kernel(
22
+ const int64_t n,
23
+ const dt* data_im,
24
+ const int64_t height,
25
+ const int64_t width,
26
+ const int64_t kernel_height,
27
+ const int64_t kernel_width,
28
+ const int64_t pad_height,
29
+ const int64_t pad_width,
30
+ const int64_t stride_height,
31
+ const int64_t stride_width,
32
+ const int64_t dilation_height,
33
+ const int64_t dilation_width,
34
+ const int64_t height_col,
35
+ const int64_t width_col,
36
+ dt* data_col) {
37
+ CUDA_KERNEL_LOOP(index, n) {
38
+ int64_t w_out = index % width_col;
39
+
40
+ int64_t idx = index / width_col;
41
+
42
+ int64_t h_out = idx % height_col;
43
+ int64_t channel_in = idx / height_col;
44
+ int64_t channel_out = channel_in * kernel_height * kernel_width;
45
+ int64_t h_in = h_out * stride_height - pad_height;
46
+ int64_t w_in = w_out * stride_width - pad_width;
47
+
48
+ dt* col = data_col + (channel_out * height_col + h_out) * width_col + w_out;
49
+ const dt* im = data_im + (channel_in * height + h_in) * width + w_in;
50
+
51
+ for (int64_t i = 0; i < kernel_height; ++i) {
52
+ for (int64_t j = 0; j < kernel_width; ++j) {
53
+ int64_t h = h_in + i * dilation_height;
54
+ int64_t w = w_in + j * dilation_width;
55
+ *col = (h >= 0 && w >= 0 && h < height && w < width)
56
+ ? im[i * dilation_height * width + j * dilation_width]
57
+ : static_cast<dt>(0);
58
+ col += height_col * width_col;
59
+ }
60
+ }
61
+ }
62
+ }
63
+
64
+ template <typename dt>
65
+ void im2col(
66
+ cudaStream_t stream,
67
+ const dt* data_im,
68
+ const int64_t channels,
69
+ const int64_t height,
70
+ const int64_t width,
71
+ const int64_t height_col,
72
+ const int64_t width_col,
73
+ const int64_t kernel_height,
74
+ const int64_t kernel_width,
75
+ const int64_t pad_height,
76
+ const int64_t pad_width,
77
+ const int64_t stride_height,
78
+ const int64_t stride_width,
79
+ const int64_t dilation_height,
80
+ const int64_t dilation_width,
81
+ dt* data_col) {
82
+ // We are going to launch channels * height_col * width_col kernels, each
83
+ // kernel responsible for copying a single-channel grid.
84
+ int64_t num_kernels = channels * height_col * width_col;
85
+ // Launch CUDA_NUM_THREADS = 1024
86
+ im2col_kernel<<<GET_BLOCKS(num_kernels), 1024, 0, stream>>>(
87
+ num_kernels,
88
+ data_im,
89
+ height,
90
+ width,
91
+ kernel_height,
92
+ kernel_width,
93
+ pad_height,
94
+ pad_width,
95
+ stride_height,
96
+ stride_width,
97
+ dilation_height,
98
+ dilation_width,
99
+ height_col,
100
+ width_col,
101
+ data_col);
102
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
103
+ }
104
+
105
+ template <typename accT, typename dt>
106
+ __forceinline__ __device__ void col2im_device(
107
+ const int64_t index,
108
+ const dt* data_col,
109
+ const int64_t height,
110
+ const int64_t width,
111
+ const int64_t channels,
112
+ const int64_t kernel_h,
113
+ const int64_t kernel_w,
114
+ const int64_t pad_height,
115
+ const int64_t pad_width,
116
+ const int64_t stride_height,
117
+ const int64_t stride_width,
118
+ const int64_t dilation_height,
119
+ const int64_t dilation_width,
120
+ const int64_t height_col,
121
+ const int64_t width_col,
122
+ dt* data_im) {
123
+ accT val = static_cast<accT>(0);
124
+ const int64_t w_im = index % width + pad_width;
125
+ const int64_t h_im = (index / width) % height + pad_height;
126
+ const int64_t c_im = index / (width * height);
127
+ int64_t kernel_extent_w = (kernel_w - 1) * dilation_width + 1;
128
+ int64_t kernel_extent_h = (kernel_h - 1) * dilation_height + 1;
129
+ // compute the start and end of the output
130
+ const int64_t w_col_start = (w_im < kernel_extent_w)
131
+ ? 0
132
+ : (w_im - kernel_extent_w) / stride_width + 1;
133
+ const int64_t w_col_end = ::min(w_im / stride_width + 1, width_col);
134
+ const int64_t h_col_start = (h_im < kernel_extent_h)
135
+ ? 0
136
+ : (h_im - kernel_extent_h) / stride_height + 1;
137
+ const int64_t h_col_end = ::min(h_im / stride_height + 1, height_col);
138
+
139
+ // TODO: use LCM of stride and dilation to avoid unnecessary loops
140
+ for (int64_t h_col = h_col_start; h_col < h_col_end; h_col += 1) {
141
+ for (int64_t w_col = w_col_start; w_col < w_col_end; w_col += 1) {
142
+ int64_t h_k = (h_im - h_col * stride_height);
143
+ int64_t w_k = (w_im - w_col * stride_width);
144
+ if (h_k % dilation_height == 0 && w_k % dilation_width == 0) {
145
+ h_k /= dilation_height;
146
+ w_k /= dilation_width;
147
+ int64_t data_col_index =
148
+ (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col +
149
+ h_col) *
150
+ width_col +
151
+ w_col;
152
+ val += data_col[data_col_index];
153
+ }
154
+ }
155
+ }
156
+ data_im[index] = static_cast<dt>(val);
157
+ }
158
+
159
+ template <typename dt, typename accT>
160
+ C10_LAUNCH_BOUNDS_1(512)
161
+ __global__ void col2im_kernel(
162
+ const int64_t n,
163
+ const dt* data_col,
164
+ const int64_t height,
165
+ const int64_t width,
166
+ const int64_t channels,
167
+ const int64_t kernel_h,
168
+ const int64_t kernel_w,
169
+ const int64_t pad_height,
170
+ const int64_t pad_width,
171
+ const int64_t stride_height,
172
+ const int64_t stride_width,
173
+ const int64_t dilation_height,
174
+ const int64_t dilation_width,
175
+ const int64_t height_col,
176
+ const int64_t width_col,
177
+ dt* data_im) {
178
+ CUDA_KERNEL_LOOP(index, n) {
179
+ col2im_device<accT>(
180
+ index,
181
+ data_col,
182
+ height,
183
+ width,
184
+ channels,
185
+ kernel_h,
186
+ kernel_w,
187
+ pad_height,
188
+ pad_width,
189
+ stride_height,
190
+ stride_width,
191
+ dilation_height,
192
+ dilation_width,
193
+ height_col,
194
+ width_col,
195
+ data_im);
196
+ }
197
+ }
198
+
199
+ template <typename dt, typename accT>
200
+ void col2im(
201
+ cudaStream_t stream,
202
+ const dt* data_col,
203
+ const int64_t channels,
204
+ const int64_t height,
205
+ const int64_t width,
206
+ const int64_t height_col,
207
+ const int64_t width_col,
208
+ const int64_t patch_height,
209
+ const int64_t patch_width,
210
+ const int64_t pad_height,
211
+ const int64_t pad_width,
212
+ const int64_t stride_height,
213
+ const int64_t stride_width,
214
+ const int64_t dilation_height,
215
+ const int64_t dilation_width,
216
+ dt* data_im) {
217
+ int64_t num_kernels = channels * height * width;
218
+ // To avoid involving atomic operations, we will launch one kernel per
219
+ // bottom dimension, and then in the kernel add up the top dimensions.
220
+ // CUDA_NUM_THREADS = 1024
221
+ col2im_kernel<dt, accT>
222
+ <<<GET_BLOCKS(num_kernels, 512), 512, 0, stream>>>(
223
+ num_kernels,
224
+ data_col,
225
+ height,
226
+ width,
227
+ channels,
228
+ patch_height,
229
+ patch_width,
230
+ pad_height,
231
+ pad_width,
232
+ stride_height,
233
+ stride_width,
234
+ dilation_height,
235
+ dilation_width,
236
+ height_col,
237
+ width_col,
238
+ data_im);
239
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
240
+ }
241
+
242
+ template <typename dt>
243
+ C10_LAUNCH_BOUNDS_1(512)
244
+ __global__ void col2im_batched_kernel(
245
+ const int64_t n,
246
+ const dt* data_col,
247
+ const int64_t col_batch_stride,
248
+ const int64_t nbatch,
249
+ const int64_t height,
250
+ const int64_t width,
251
+ const int64_t channels,
252
+ const int64_t kernel_h,
253
+ const int64_t kernel_w,
254
+ const int64_t pad_height,
255
+ const int64_t pad_width,
256
+ const int64_t stride_height,
257
+ const int64_t stride_width,
258
+ const int64_t dilation_height,
259
+ const int64_t dilation_width,
260
+ const int64_t height_col,
261
+ const int64_t width_col,
262
+ dt* data_im,
263
+ const int64_t im_batch_stride) {
264
+ using accT = at::acc_type<dt, /*is_cuda*/true>;
265
+ const auto im_numel = n * nbatch;
266
+
267
+ CUDA_KERNEL_LOOP_TYPE(index, im_numel, int64_t) {
268
+ const auto ibatch = index / n;
269
+ const auto slice_index = index % n;
270
+
271
+ col2im_device<accT>(
272
+ slice_index,
273
+ data_col + ibatch * col_batch_stride,
274
+ height,
275
+ width,
276
+ channels,
277
+ kernel_h,
278
+ kernel_w,
279
+ pad_height,
280
+ pad_width,
281
+ stride_height,
282
+ stride_width,
283
+ dilation_height,
284
+ dilation_width,
285
+ height_col,
286
+ width_col,
287
+ data_im + ibatch * im_batch_stride);
288
+ }
289
+ }
290
+
291
+ template <typename dt>
292
+ void col2im_batched(
293
+ cudaStream_t stream,
294
+ const dt* data_col,
295
+ const int64_t col_batch_stride,
296
+ const int64_t nbatch,
297
+ const int64_t channels,
298
+ const int64_t height,
299
+ const int64_t width,
300
+ const int64_t height_col,
301
+ const int64_t width_col,
302
+ const int64_t patch_height,
303
+ const int64_t patch_width,
304
+ const int64_t pad_height,
305
+ const int64_t pad_width,
306
+ const int64_t stride_height,
307
+ const int64_t stride_width,
308
+ const int64_t dilation_height,
309
+ const int64_t dilation_width,
310
+ dt* data_im,
311
+ const int64_t im_batch_stride) {
312
+ const int64_t num_kernels = channels * height * width;
313
+ const int64_t output_numel = nbatch * num_kernels;
314
+ if (output_numel == 0) {
315
+ return; // No work to do
316
+ }
317
+
318
+ // To avoid involving atomic operations, we will launch one kernel per
319
+ // bottom dimension, and then in the kernel add up the top dimensions.
320
+ // CUDA_NUM_THREADS = 1024
321
+ col2im_batched_kernel<<<GET_BLOCKS(output_numel, 512), 512, 0, stream>>>(
322
+ num_kernels,
323
+ data_col,
324
+ col_batch_stride,
325
+ nbatch,
326
+ height,
327
+ width,
328
+ channels,
329
+ patch_height,
330
+ patch_width,
331
+ pad_height,
332
+ pad_width,
333
+ stride_height,
334
+ stride_width,
335
+ dilation_height,
336
+ dilation_width,
337
+ height_col,
338
+ width_col,
339
+ data_im,
340
+ im_batch_stride);
341
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
342
+ }
343
+
344
+ } // namespace native
345
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/cuda/reduction_template.cuh ADDED
@@ -0,0 +1,680 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ namespace at {
2
+ namespace cuda {
3
+ //windows doesn't like large string literals, so split in two
4
+ const std::string reduction_template_0 = R"ESCAPE(
5
+ #define C10_HOST_DEVICE __host__ __device__
6
+ #define C10_DEVICE __device__
7
+ #if defined(__clang__) && defined(__HIP__)
8
+ #ifndef __forceinline__
9
+ #define __forceinline__ inline __attribute__((always_inline))
10
+ #endif
11
+ // until ROCm support for kernel asserts is restored
12
+ #define assert(expr) (static_cast<void>(0))
13
+ #endif
14
+
15
+ template <typename T>
16
+ __device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
17
+ {
18
+ #if defined(__clang__) && defined(__HIP__)
19
+ return __shfl_down(value, delta, width);
20
+ #else
21
+ return __shfl_down_sync(mask, value, delta, width);
22
+ #endif
23
+ }
24
+
25
+
26
+ #if ${complex}
27
+ template <typename T>
28
+ __device__ __forceinline__ std::complex<T> WARP_SHFL_DOWN(std::complex<T> value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
29
+ {
30
+ return std::complex<T>(
31
+ #if defined(__clang__) && defined(__HIP__)
32
+ __shfl_down(value.real(), delta, width),
33
+ __shfl_down(value.imag(), delta, width));
34
+ #else
35
+ __shfl_down_sync(mask, value.real(), delta, width),
36
+ __shfl_down_sync(mask, value.imag(), delta, width));
37
+ #endif
38
+ }
39
+ #endif
40
+
41
+ // aligned vector generates vectorized load/store on CUDA
42
+ template<typename scalar_t, int vec_size>
43
+ struct alignas(sizeof(scalar_t) * vec_size) aligned_vector {
44
+ scalar_t val[vec_size];
45
+ };
46
+
47
+
48
+ C10_HOST_DEVICE static void reduce_fraction(size_t &numerator, size_t &denominator) {
49
+ // get GCD of num and denom using Euclid's algorithm.
50
+ // Can replace this with std::gcd if we ever support c++17.
51
+ size_t a = denominator;
52
+ size_t b = numerator;
53
+ while (b != 0) {
54
+ a %= b;
55
+ // swap(a,b)
56
+ size_t tmp = a;
57
+ a = b;
58
+ b = tmp;
59
+ }
60
+
61
+ // a is now the GCD
62
+ numerator /= a;
63
+ denominator /= a;
64
+ }
65
+
66
+
67
+
68
+
69
+ struct ReduceConfig {
70
+ //has to match host-side ReduceConfig in the eager code
71
+ static constexpr int BLOCK_X = 0;
72
+ static constexpr int BLOCK_Y = 1;
73
+ static constexpr int CTA = 2;
74
+
75
+ static constexpr int input_vec_size = 4;
76
+ int element_size_bytes;
77
+ int num_inputs;
78
+ int num_outputs;
79
+ int step_input = 1;
80
+ int step_output = 1;
81
+ int ctas_per_output = 1;
82
+ int input_mult[3] = {0, 0, 0};
83
+ int output_mult[2] = {0, 0};
84
+
85
+ int block_width;
86
+ int block_height;
87
+ int num_threads;
88
+
89
+ bool vectorize_input = false;
90
+ int output_vec_size = 1;
91
+
92
+ C10_HOST_DEVICE bool should_block_x_reduce() const {
93
+ return input_mult[BLOCK_X] != 0;
94
+ }
95
+
96
+ C10_HOST_DEVICE bool should_block_y_reduce() const {
97
+ return input_mult[BLOCK_Y] != 0;
98
+ }
99
+
100
+ C10_HOST_DEVICE bool should_global_reduce() const {
101
+ return input_mult[CTA] != 0;
102
+ }
103
+
104
+ C10_DEVICE bool should_store(int output_idx) const {
105
+ return output_idx < num_outputs &&
106
+ (!should_block_x_reduce() || threadIdx.x == 0) &&
107
+ (!should_block_y_reduce() || threadIdx.y == 0);
108
+ }
109
+
110
+ C10_DEVICE bool should_reduce_tail() const {
111
+ return (!should_block_y_reduce() || threadIdx.y == 0) &&
112
+ (!should_global_reduce() || blockIdx.y == 0);
113
+ }
114
+
115
+ C10_HOST_DEVICE int input_idx() const {
116
+ int lane = threadIdx.x;
117
+ int warp = threadIdx.y;
118
+ int cta2 = blockIdx.y;
119
+ return (lane * input_mult[BLOCK_X] +
120
+ warp * input_mult[BLOCK_Y] +
121
+ cta2 * input_mult[CTA]);
122
+ }
123
+
124
+ template <int output_vec_size>
125
+ C10_HOST_DEVICE int output_idx() const {
126
+ int lane = threadIdx.x;
127
+ int warp = threadIdx.y;
128
+ int cta1 = blockIdx.x;
129
+ return (lane * output_mult[BLOCK_X] +
130
+ warp * output_mult[BLOCK_Y] +
131
+ cta1 * step_output) * output_vec_size;
132
+ }
133
+
134
+ C10_DEVICE int shared_memory_offset(int offset) const {
135
+ return threadIdx.x + (threadIdx.y + offset) * blockDim.x;
136
+ }
137
+
138
+ C10_DEVICE int staging_memory_offset(int cta2) const {
139
+ int offset = cta2 + blockIdx.x * gridDim.y;
140
+ if (!should_block_x_reduce()) {
141
+ offset = threadIdx.x + offset * blockDim.x;
142
+ }
143
+ return offset;
144
+ }
145
+
146
+
147
+ };
148
+
149
+
150
+ //TODO this will need to be different for more generic reduction functions
151
+ namespace reducer {
152
+
153
+ using scalar_t = ${scalar_type};
154
+ using arg_t = ${reduction_accum_type};
155
+ using out_scalar_t = ${result_type};
156
+
157
+
158
+ inline __device__ ${functor}
159
+
160
+ inline __device__ out_scalar_t project(arg_t arg) {
161
+ return (out_scalar_t) arg;
162
+ }
163
+
164
+ inline __device__ arg_t warp_shfl_down(arg_t arg, int offset) {
165
+ return WARP_SHFL_DOWN(arg, offset);
166
+ }
167
+
168
+ inline __device__ arg_t translate_idx(arg_t acc, int64_t /*idx*/) {
169
+ return acc;
170
+ }
171
+
172
+ // wrap a normal reduction that ignores the index
173
+ inline __device__ arg_t reduce(arg_t acc, arg_t val, int64_t idx) {
174
+ return combine(acc, val);
175
+ }
176
+ }
177
+
178
+
179
+ struct ReduceJitOp {
180
+ using scalar_t = ${scalar_type};
181
+ using arg_t = ${reduction_accum_type};
182
+ using out_scalar_t = ${result_type};
183
+
184
+ using InputCalculator = OffsetCalculator<1>;
185
+ using OutputCalculator = OffsetCalculator<2>;
186
+
187
+ // static constexpr bool can_accumulate_in_output =
188
+ // std::is_convertible<arg_t, out_scalar_t>::value
189
+ // && std::is_convertible<out_scalar_t, arg_t>::value;
190
+
191
+ static constexpr int input_vec_size = ReduceConfig::input_vec_size;
192
+
193
+ arg_t ident;
194
+ ReduceConfig config;
195
+ InputCalculator input_calc;
196
+ OutputCalculator output_calc;
197
+ const void* src;
198
+ const char* dst[2]; //it accepts at most two destinations
199
+ // acc_buf used for accumulation among sub Tensor Iterator when accumulation on
200
+ // output is not permissible
201
+ void* acc_buf;
202
+ // cta_buf used for accumulation between blocks during global reduction
203
+ void* cta_buf;
204
+ int* semaphores;
205
+ int64_t base_idx;
206
+ bool accumulate;
207
+ bool final_output;
208
+ int noutputs;
209
+
210
+
211
+ C10_DEVICE void run() const {
212
+ extern __shared__ char shared_memory[];
213
+ uint32_t output_idx = config.output_idx<${output_vec_size}>();
214
+ uint32_t input_idx = config.input_idx();
215
+ auto base_offsets1 = output_calc.get(output_idx)[1];
216
+
217
+ using arg_vec_t = Array<arg_t, ${output_vec_size}>;
218
+ arg_vec_t value;
219
+
220
+ if (output_idx < config.num_outputs && input_idx < config.num_inputs) {
221
+ const scalar_t* input_slice = (const scalar_t*)((const char*)src + base_offsets1);
222
+
223
+ value = thread_reduce<${output_vec_size}>(input_slice);
224
+ }
225
+
226
+ if (config.should_block_y_reduce()) {
227
+ value = block_y_reduce<${output_vec_size}>(value, shared_memory);
228
+ }
229
+ if (config.should_block_x_reduce()) {
230
+ value = block_x_reduce<${output_vec_size}>(value, shared_memory);
231
+ }
232
+
233
+ using out_ptr_vec_t = Array<out_scalar_t*, ${output_vec_size}>;
234
+ using offset_vec_t = Array<uint32_t, ${output_vec_size}>;
235
+ offset_vec_t base_offsets;
236
+ out_ptr_vec_t out;
237
+
238
+ #pragma unroll
239
+ for (int i = 0; i < ${output_vec_size}; i++) {
240
+ base_offsets[i] = output_calc.get(output_idx + i)[0];
241
+ out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]);
242
+ }
243
+
244
+ arg_vec_t* acc = nullptr;
245
+ if (acc_buf != nullptr) {
246
+ size_t numerator = sizeof(arg_t);
247
+ size_t denominator = sizeof(out_scalar_t);
248
+ reduce_fraction(numerator, denominator);
249
+ acc = (arg_vec_t*)((char*)acc_buf + (base_offsets[0] * numerator / denominator));
250
+ }
251
+
252
+ if (config.should_global_reduce()) {
253
+ value = global_reduce<${output_vec_size}>(value, acc, shared_memory);
254
+ } else if (config.should_store(output_idx)) {
255
+ if (accumulate) {
256
+ #pragma unroll
257
+ for (int i = 0; i < ${output_vec_size}; i++) {
258
+ value[i] = reducer::translate_idx(value[i], base_idx);
259
+ }
260
+ }
261
+
262
+ if (acc == nullptr) {
263
+ if (accumulate) {
264
+ value = accumulate_in_output<${output_vec_size}>(out, value);
265
+ }
266
+ if (final_output) {
267
+ set_results_to_output<${output_vec_size}>(value, base_offsets);
268
+ } else {
269
+ #pragma unroll
270
+ for (int i = 0; i < ${output_vec_size}; i++) {
271
+ *(out[i]) = get_accumulated_output(out[i], value[i]);
272
+ }
273
+ }
274
+ } else {
275
+ if (accumulate) {
276
+ #pragma unroll
277
+ for (int i = 0; i < ${output_vec_size}; i++) {
278
+ value[i] = reducer::combine((*acc)[i], value[i]);
279
+ }
280
+ }
281
+ if (final_output) {
282
+ set_results_to_output<${output_vec_size}>(value, base_offsets);
283
+ } else {
284
+ *acc = value;
285
+ }
286
+ }
287
+ }
288
+ }
289
+
290
+ template <int output_vec_size>
291
+ C10_DEVICE Array<arg_t, output_vec_size> thread_reduce(const scalar_t* data) const {
292
+ if (config.vectorize_input) {
293
+ assert(output_vec_size == 1);
294
+ // reduce at the header of input_slice where memory is not aligned,
295
+ // so that thread_reduce will have an aligned memory to work on.
296
+ return {input_vectorized_thread_reduce_impl(data)};
297
+ } else {
298
+ uint32_t element_stride = input_calc.strides_[0][0] / sizeof(scalar_t);
299
+ bool is_contiguous = (input_calc.dims == 1 && element_stride == 1);
300
+ if (is_contiguous) {
301
+ return thread_reduce_impl<output_vec_size>(data, [](uint32_t idx) { return idx; });
302
+ } else if (input_calc.dims == 1) {
303
+ return thread_reduce_impl<output_vec_size>(data, [&](uint32_t idx) { return idx * element_stride; });
304
+ } else {
305
+ return thread_reduce_impl<output_vec_size>(data, [&](uint32_t idx) { return input_calc.get(idx)[0] / sizeof(scalar_t); });
306
+ }
307
+ }
308
+ }
309
+
310
+ C10_DEVICE arg_t input_vectorized_thread_reduce_impl(const scalar_t* data) const {
311
+ uint32_t end = config.num_inputs;
312
+
313
+ // Handle the head of input slice where data is not aligned
314
+ arg_t value = ident;
315
+ constexpr int align_bytes = alignof(aligned_vector<scalar_t, input_vec_size>);
316
+ constexpr int align_elements = align_bytes / sizeof(scalar_t);
317
+ int shift = ((int64_t)data) % align_bytes / sizeof(scalar_t);
318
+ if (shift > 0) {
319
+ data -= shift;
320
+ end += shift;
321
+ if(threadIdx.x >= shift && threadIdx.x < align_elements && config.should_reduce_tail()){
322
+ value = reducer::reduce(value, data[threadIdx.x], threadIdx.x - shift);
323
+ }
324
+ end -= align_elements;
325
+ data += align_elements;
326
+ shift = align_elements - shift;
327
+ }
328
+
329
+ // Do the vectorized reduction
330
+ using load_t = aligned_vector<scalar_t, input_vec_size>;
331
+
332
+ uint32_t idx = config.input_idx();
333
+ const uint32_t stride = config.step_input;
334
+
335
+ // Multiple accumulators to remove dependency between unrolled loops.
336
+ arg_t value_list[input_vec_size];
337
+ value_list[0] = value;
338
+
339
+ #pragma unroll
340
+ for (int i = 1; i < input_vec_size; i++) {
341
+ value_list[i] = ident;
342
+ }
343
+
344
+ scalar_t values[input_vec_size];
345
+
346
+ load_t *values_vector = reinterpret_cast<load_t*>(&values[0]);
347
+
348
+ while (idx * input_vec_size + input_vec_size - 1 < end) {
349
+ *values_vector = reinterpret_cast<const load_t*>(data)[idx];
350
+ #pragma unroll
351
+ for (uint32_t i = 0; i < input_vec_size; i++) {
352
+ value_list[i] = reducer::reduce(value_list[i], values[i], shift + idx * input_vec_size + i);
353
+ }
354
+ idx += stride;
355
+ }
356
+
357
+ // tail
358
+ uint32_t tail_start = end - end % input_vec_size;
359
+ if (config.should_reduce_tail()) {
360
+ int idx = tail_start + threadIdx.x;
361
+ if (idx < end) {
362
+ value_list[0] = reducer::reduce(value_list[0], data[idx], idx + shift);
363
+ }
364
+ }
365
+
366
+ // combine accumulators
367
+ #pragma unroll
368
+ for (int i = 1; i < input_vec_size; i++) {
369
+ value_list[0] = reducer::combine(value_list[0], value_list[i]);
370
+ }
371
+ return value_list[0];
372
+ }
373
+
374
+ template <int output_vec_size, typename offset_calc_t>
375
+ C10_DEVICE Array<arg_t, output_vec_size> thread_reduce_impl(const scalar_t* data_, offset_calc_t calc) const {
376
+ uint32_t idx = config.input_idx();
377
+ const uint32_t end = config.num_inputs;
378
+ const uint32_t stride = config.step_input;
379
+ const int vt0=${vt0};
380
+
381
+ using arg_vec_t = Array<arg_t, output_vec_size>;
382
+ using load_t = aligned_vector<scalar_t, output_vec_size>;
383
+ const load_t* data = reinterpret_cast<const load_t*>(data_);
384
+
385
+ // Multiple accumulators to remove dependency between unrolled loops.
386
+ arg_vec_t value_list[vt0];
387
+
388
+ #pragma unroll
389
+ for (int i = 0; i < vt0; i++) {
390
+ #pragma unroll
391
+ for (int j = 0; j < output_vec_size; j++) {
392
+ value_list[i][j] = ident;
393
+ }
394
+ }
395
+
396
+ load_t values[vt0];
397
+
398
+ while (idx + (vt0 - 1) * stride < end) {
399
+ #pragma unroll
400
+ for (uint32_t i = 0; i < vt0; i++) {
401
+ values[i] = data[calc(idx + i * stride) / output_vec_size];
402
+ }
403
+ #pragma unroll
404
+ for (uint32_t i = 0; i < vt0; i++) {
405
+ #pragma unroll
406
+ for (uint32_t j = 0; j < output_vec_size; j++) {
407
+ value_list[i][j] = reducer::reduce(value_list[i][j], values[i].val[j], idx + i * stride);
408
+ }
409
+ }
410
+ idx += stride * vt0;
411
+ }
412
+
413
+ // tail
414
+ int idx_ = idx;
415
+ #pragma unroll
416
+ for (uint32_t i = 0; i < vt0; i++) {
417
+ if (idx >= end) {
418
+ break;
419
+ }
420
+ values[i] = data[calc(idx) / output_vec_size];
421
+ idx += stride;
422
+ }
423
+ idx = idx_;
424
+ #pragma unroll
425
+ for (uint32_t i = 0; i < vt0; i++) {
426
+ if (idx >= end) {
427
+ break;
428
+ }
429
+ #pragma unroll
430
+ for (uint32_t j = 0; j < output_vec_size; j++) {
431
+ value_list[i][j] = reducer::reduce(value_list[i][j], values[i].val[j], idx);
432
+ }
433
+ idx += stride;
434
+ }
435
+
436
+ // combine accumulators
437
+ #pragma unroll
438
+ for (int i = 1; i < vt0; i++) {
439
+ #pragma unroll
440
+ for (uint32_t j = 0; j < output_vec_size; j++) {
441
+ value_list[0][j] = reducer::combine(value_list[0][j], value_list[i][j]);
442
+ }
443
+ }
444
+ return value_list[0];
445
+ }
446
+ template <int output_vec_size>
447
+ C10_DEVICE Array<arg_t, output_vec_size> block_x_reduce(Array<arg_t, output_vec_size> value, char* shared_memory) const {
448
+ using args_vec_t = Array<arg_t, output_vec_size>;
449
+ int dim_x = blockDim.x;
450
+ args_vec_t* shared = (args_vec_t*)shared_memory;
451
+ if (dim_x > warpSize) {
452
+ int address_base = threadIdx.x + threadIdx.y*blockDim.x;
453
+ shared[address_base] = value;
454
+ for (int offset = dim_x/2; offset >= warpSize; offset >>= 1) {
455
+ __syncthreads();
456
+ if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) {
457
+ args_vec_t other = shared[address_base + offset];
458
+ #pragma unroll
459
+ for (int i = 0; i < output_vec_size; i++) {
460
+ value[i] = reducer::combine(value[i], other[i]);
461
+ }
462
+ shared[address_base] = value;
463
+ }
464
+ }
465
+ dim_x = warpSize;
466
+ }
467
+
468
+ __syncthreads();
469
+
470
+ for (int offset = 1; offset < dim_x; offset <<= 1) {
471
+ #pragma unroll
472
+ for (int i = 0; i < output_vec_size; i++) {
473
+ arg_t other = reducer::warp_shfl_down(value[i], offset);
474
+ value[i] = reducer::combine(value[i], other);
475
+ }
476
+ }
477
+ return value;
478
+ }
479
+
480
+ template <int output_vec_size>
481
+ C10_DEVICE Array<arg_t, output_vec_size> block_y_reduce(Array<arg_t, output_vec_size> value, char* shared_memory) const {
482
+ using args_vec_t = Array<arg_t, output_vec_size>;
483
+ args_vec_t* shared = (args_vec_t*)shared_memory;
484
+ shared[config.shared_memory_offset(0)] = value;
485
+ for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
486
+ __syncthreads();
487
+ if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
488
+ args_vec_t other = shared[config.shared_memory_offset(offset)];
489
+ #pragma unroll
490
+ for (int i = 0; i < output_vec_size; i++) {
491
+ value[i] = reducer::combine(value[i], other[i]);
492
+ }
493
+ shared[config.shared_memory_offset(0)] = value;
494
+ }
495
+ }
496
+ return value;
497
+ }
498
+ )ESCAPE";
499
+
500
+ const std::string reduction_template_1 = R"ESCAPE(
501
+
502
+ C10_DEVICE bool mark_block_finished() const {
503
+ __shared__ bool is_last_block_done_shared;
504
+
505
+ __syncthreads();
506
+ if (threadIdx.x == 0 && threadIdx.y == 0) {
507
+ int prev_blocks_finished = atomicAdd(&semaphores[blockIdx.x], 1);
508
+ is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1);
509
+ }
510
+
511
+ __syncthreads();
512
+
513
+ return is_last_block_done_shared;
514
+ }
515
+
516
+ template <int output_vec_size>
517
+ C10_DEVICE Array<arg_t, output_vec_size> accumulate_in_output(
518
+ Array<out_scalar_t*, output_vec_size> out,
519
+ Array<arg_t, output_vec_size> value
520
+ ) const {
521
+ Array<arg_t, output_vec_size> ret;
522
+ #pragma unroll
523
+ for (int i = 0; i < output_vec_size; i++) {
524
+ ret[i] = reducer::combine(*(out[i]), value[i]);
525
+ }
526
+ return ret;
527
+ }
528
+
529
+
530
+ C10_DEVICE out_scalar_t get_accumulated_output(
531
+ out_scalar_t* out, arg_t value
532
+ ) const {
533
+ assert(!final_output);
534
+ return (out_scalar_t)value;
535
+ }
536
+
537
+ template<class T>
538
+ C10_DEVICE void set_results(const T x, const uint32_t base_offset) const {
539
+ assert(noutputs == 1);
540
+ auto res = (out_scalar_t*)((char*)dst[0] + base_offset);
541
+ *res = x;
542
+ }
543
+
544
+ //TODO - multi-output reduction - we won't be able to use thrust::pair
545
+ //just explicitly specify typed output reads/writes
546
+ //Currently implemented for max of two outputs
547
+ // template<class T1, class T2>
548
+ // C10_DEVICE void set_results(const thrust::pair<T1, T2> x, const index_t base_offset) const {
549
+ // if (noutputs >= 1) {
550
+ // auto res0 = (T1*)((char*)dst[0] + base_offset);
551
+ // *res0 = x.first;
552
+ // }
553
+ // if (noutputs >= 2) {
554
+ // // base offset is computed assuming element size being sizeof(T1), so we need to make a
555
+ // // correction to obtain the correct base offset
556
+ // auto res1 = (T2*) ((char *) dst[1] + base_offset / sizeof(T1) * sizeof(T2));
557
+ // *res1 = x.second;
558
+ // }
559
+ // }
560
+
561
+ template <int output_vec_size>
562
+ C10_DEVICE void set_results_to_output(Array<arg_t, output_vec_size> value, Array<uint32_t, output_vec_size> base_offset) const {
563
+ assert(final_output);
564
+ #pragma unroll
565
+ for (int i = 0; i < output_vec_size; i++) {
566
+ set_results(reducer::project(value[i]), base_offset[i]);
567
+ }
568
+ }
569
+
570
+ template <int output_vec_size>
571
+ C10_DEVICE Array<arg_t, output_vec_size> global_reduce(Array<arg_t, output_vec_size> value, Array<arg_t, output_vec_size> *acc, char* shared_memory) const {
572
+ using arg_vec_t = Array<arg_t, output_vec_size>;
573
+ using out_ptr_vec_t = Array<out_scalar_t*, output_vec_size>;
574
+ using offset_vec_t = Array<uint32_t, output_vec_size>;
575
+
576
+ arg_vec_t* reduce_buffer = (arg_vec_t*)cta_buf;
577
+ uint32_t output_idx = config.output_idx<output_vec_size>();
578
+ offset_vec_t base_offsets;
579
+ out_ptr_vec_t out;
580
+
581
+ #pragma unroll
582
+ for (int i = 0; i < output_vec_size; i++) {
583
+ base_offsets[i] = output_calc.get(output_idx + i)[0];
584
+ out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]);
585
+ }
586
+
587
+ bool should_store = config.should_store(output_idx);
588
+ if (should_store) {
589
+ uint32_t offset = config.staging_memory_offset(blockIdx.y);
590
+ reduce_buffer[offset] = value;
591
+ }
592
+
593
+ __threadfence(); // make sure writes are globally visible
594
+ __syncthreads(); // if multiple warps in this block wrote to staging, make sure they're all done
595
+ bool is_last_block_done = mark_block_finished();
596
+
597
+ if (is_last_block_done) {
598
+ value = ident;
599
+ if (config.should_block_x_reduce()) {
600
+ uint32_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
601
+ uint32_t step = blockDim.x * blockDim.y;
602
+ for (; input_offset < config.ctas_per_output; input_offset += step) {
603
+ uint32_t idx = config.staging_memory_offset(input_offset);
604
+ arg_vec_t next = reduce_buffer[idx];
605
+ #pragma unroll
606
+ for (int i = 0; i < output_vec_size; i++) {
607
+ value[i] = reducer::combine(value[i], next[i]);
608
+ }
609
+ }
610
+ } else {
611
+ uint32_t input_offset = threadIdx.y;
612
+ uint32_t step = blockDim.y;
613
+ for (; input_offset < config.ctas_per_output; input_offset += step) {
614
+ uint32_t idx = config.staging_memory_offset(input_offset);
615
+ arg_vec_t next = reduce_buffer[idx];
616
+ #pragma unroll
617
+ for (int i = 0; i < output_vec_size; i++) {
618
+ value[i] = reducer::combine(value[i], next[i]);
619
+ }
620
+ }
621
+ }
622
+ value = block_y_reduce(value, shared_memory);
623
+ if (config.should_block_x_reduce()) {
624
+ value = block_x_reduce<output_vec_size>(value, shared_memory);
625
+ }
626
+ if (should_store) {
627
+ if (accumulate) {
628
+ #pragma unroll
629
+ for (int i = 0; i < output_vec_size; i++) {
630
+ value[i] = reducer::translate_idx(value[i], base_idx);
631
+ }
632
+ }
633
+
634
+ if (acc == nullptr) {
635
+ if (accumulate) {
636
+ value = accumulate_in_output<output_vec_size>(out, value);
637
+ }
638
+ if (final_output) {
639
+ set_results_to_output<output_vec_size>(value, base_offsets);
640
+ } else {
641
+ #pragma unroll
642
+ for (int i = 0; i < output_vec_size; i++) {
643
+ *(out[i]) = get_accumulated_output(out[i], value[i]);
644
+ }
645
+ }
646
+ } else {
647
+ if (accumulate) {
648
+ #pragma unroll
649
+ for (int i = 0; i < output_vec_size; i++) {
650
+ value[i] = reducer::combine((*acc)[i], value[i]);
651
+ }
652
+ }
653
+ if (final_output) {
654
+ set_results_to_output<output_vec_size>(value, base_offsets);
655
+ } else {
656
+ *acc = value;
657
+ }
658
+ }
659
+ }
660
+ }
661
+
662
+ return value;
663
+ }
664
+ };
665
+
666
+ extern "C"
667
+ __launch_bounds__(${max_threads_lb}, 4)
668
+ __global__ void reduction_${name}_kernel(ReduceJitOp r){
669
+ r.run();
670
+ }
671
+ )ESCAPE";
672
+
673
+ const std::string reduction_template = reduction_template_0 + reduction_template_1;
674
+
675
+
676
+ const std::string &get_reduction_template() {
677
+ return reduction_template;
678
+ }
679
+
680
+ }}
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/AffineQuantizer.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+ #include <ATen/native/quantized/AffineQuantizerBase.h>
7
+
8
+ namespace at {
9
+ namespace native {
10
+
11
+ Tensor& quantize_tensor_per_tensor_affine(
12
+ const Tensor& rtensor,
13
+ Tensor& qtensor,
14
+ double scale,
15
+ int64_t zero_point);
16
+ Tensor& quantize_tensor_per_channel_affine(
17
+ const Tensor& rtensor,
18
+ Tensor& qtensor,
19
+ Tensor scales,
20
+ Tensor zero_points,
21
+ int64_t axis);
22
+
23
+ Tensor& quantize_tensor_per_channel_float_qparams(
24
+ const Tensor& rtensor,
25
+ Tensor& qtensor,
26
+ Tensor scales,
27
+ Tensor zero_points,
28
+ int64_t axis);
29
+
30
+ Tensor& dequantize_tensor_per_tensor_affine(
31
+ const Tensor& qtensor,
32
+ Tensor& rtensor,
33
+ double scale,
34
+ int64_t zero_point);
35
+ Tensor& dequantize_tensor_per_channel_affine(
36
+ const Tensor& qtensor,
37
+ Tensor& rtensor,
38
+ Tensor scales,
39
+ Tensor zero_points,
40
+ int64_t axis);
41
+ Tensor& dequantize_tensor_per_channel_float_qparams(
42
+ const Tensor& qtensor,
43
+ Tensor& rtensor,
44
+ Tensor scales,
45
+ Tensor zero_points,
46
+ int64_t axis);
47
+
48
+ using quantize_tensor_per_tensor_affine_fn =
49
+ void (*)(const Tensor& rtensor, Tensor& qtensor, double scale, int64_t zero_point);
50
+
51
+ using quantize_tensor_per_channel_affine_fn = void (*)(
52
+ const Tensor& rtensor,
53
+ Tensor& qtensor,
54
+ const Tensor& scales,
55
+ const Tensor& zero_points,
56
+ int64_t axis);
57
+
58
+ using quantize_tensor_per_channel_float_qparams_fn = void (*)(
59
+ const Tensor& rtensor,
60
+ Tensor& qtensor,
61
+ const Tensor& scales,
62
+ const Tensor& zero_points,
63
+ int64_t axis);
64
+
65
+ using dequantize_tensor_per_tensor_affine_fn =
66
+ void (*)(const Tensor& qtensor, Tensor& rtensor, double scale, int64_t zero_point);
67
+
68
+ using dequantize_tensor_per_channel_affine_fn = void (*)(
69
+ const Tensor& qtensor,
70
+ Tensor& rtensor,
71
+ const Tensor& scales,
72
+ const Tensor& zero_points,
73
+ int64_t axis);
74
+
75
+ using dequantize_tensor_per_channel_float_qparams_fn = void (*)(
76
+ const Tensor& qtensor,
77
+ Tensor& rtensor,
78
+ const Tensor& scales,
79
+ const Tensor& zero_points,
80
+ int64_t axis);
81
+
82
+ using quantize_tensor_per_tensor_affine_sub_byte_fn =
83
+ void (*)(const Tensor& rtensor, Tensor& qtensor, float scale, float zero_point);
84
+
85
+ using dequantize_tensor_per_tensor_affine_sub_byte_fn =
86
+ void (*)(const Tensor& qtensor, Tensor& rtensor, float scale, float zero_point);
87
+
88
+ DECLARE_DISPATCH(
89
+ quantize_tensor_per_tensor_affine_fn,
90
+ quantize_tensor_per_tensor_affine_stub);
91
+ DECLARE_DISPATCH(
92
+ quantize_tensor_per_channel_affine_fn,
93
+ quantize_tensor_per_channel_affine_stub);
94
+ DECLARE_DISPATCH(
95
+ quantize_tensor_per_channel_float_qparams_fn,
96
+ quantize_tensor_per_channel_float_qparams_stub);
97
+
98
+ DECLARE_DISPATCH(
99
+ dequantize_tensor_per_tensor_affine_fn,
100
+ dequantize_tensor_per_tensor_affine_stub);
101
+ DECLARE_DISPATCH(
102
+ dequantize_tensor_per_channel_affine_fn,
103
+ dequantize_tensor_per_channel_affine_stub);
104
+ DECLARE_DISPATCH(
105
+ dequantize_tensor_per_channel_float_qparams_fn,
106
+ dequantize_tensor_per_channel_float_qparams_stub);
107
+
108
+ DECLARE_DISPATCH(
109
+ quantize_tensor_per_tensor_affine_sub_byte_fn,
110
+ quantize_tensor_per_tensor_affine_sub_byte_stub);
111
+
112
+ DECLARE_DISPATCH(
113
+ dequantize_tensor_per_tensor_affine_sub_byte_fn,
114
+ dequantize_tensor_per_tensor_affine_sub_byte_stub);
115
+
116
+ template <typename T>
117
+ TORCH_API Tensor quantize_tensor(
118
+ Tensor rtensor,
119
+ Tensor qtensor,
120
+ double scale,
121
+ int64_t zero_point);
122
+ template <typename T>
123
+ TORCH_API Tensor dequantize_tensor(
124
+ Tensor qtensor,
125
+ Tensor rtensor,
126
+ double scale,
127
+ int64_t zero_point);
128
+
129
+ } // namespace native
130
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/AffineQuantizerBase.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <c10/core/ScalarType.h>
4
+
5
+ namespace at {
6
+ namespace native {
7
+
8
+ // Quantize a float value into a uint value given scale and zero_point
9
+ template <typename T>
10
+ TORCH_API T quantize_val(double scale, int64_t zero_point, float value);
11
+ // TODO combine this with quantize_val once the numerics for ARM are aligned
12
+ // with it
13
+ template <typename T>
14
+ T quantize_val_arm(
15
+ const float scale,
16
+ const int32_t zero_point,
17
+ const float value);
18
+ template <typename T, int precision = 8>
19
+ void quantize_vec(
20
+ double scale,
21
+ int64_t zero_point,
22
+ const float* src,
23
+ T* dst,
24
+ size_t count = 8);
25
+ template <typename T>
26
+ TORCH_API float dequantize_val(double scale, int64_t zero_point, T value);
27
+ template <typename T>
28
+ TORCH_API float dequantize_vec(
29
+ double scale,
30
+ int64_t zero_point,
31
+ const T* src,
32
+ float* dst,
33
+ size_t count = 8);
34
+ template <typename SRC_T, typename DST_T>
35
+ TORCH_API DST_T requantize_val(double, int64_t, double, int64_t, SRC_T src);
36
+
37
+ // Given a multiplier and a zero_point, requantize int32_t computed values back
38
+ // to quantized values. See comment above
39
+ // make_per_tensor_affine_quantizer function for the usage of int64_t
40
+ template <typename DST_T>
41
+ TORCH_API DST_T
42
+ requantize_from_int(double multiplier, int64_t zero_point, int64_t src);
43
+
44
+ int quantize_val_float_qparams(float scale, float zero_point, float value, int qmin, int qmax);
45
+
46
+ } // namespace native
47
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/ConvUtils.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/List.h>
3
+ #include <ATen/native/ConvUtils.h>
4
+
5
+ namespace at::native::quantized {
6
+ namespace {
7
+ // MakeConvOutputShape used from both CPU and CUDA libraries
8
+ // and exporting symbol from torch_cpu would probably take more storage
9
+ // than duplicating implementation which likely be inlined away
10
+ template <int kSpatialDim>
11
+ at::SmallVector<int64_t, kSpatialDim + 2> MakeConvOutputShape(
12
+ int N, // mini-batch
13
+ int M, // output channels
14
+ const std::array<int64_t, kSpatialDim>& input_image_shape,
15
+ const std::vector<int64_t>& kernel,
16
+ const torch::List<int64_t>& stride,
17
+ const torch::List<int64_t>& padding,
18
+ const torch::List<int64_t>& dilation);
19
+
20
+ #if defined(USE_CUDA) || defined(USE_PYTORCH_QNNPACK)
21
+ template <>
22
+ at::SmallVector<int64_t, 4> MakeConvOutputShape<2>(
23
+ int N, // mini-batch
24
+ int M, // output channels
25
+ const std::array<int64_t, 2>& input_image_shape,
26
+ const std::vector<int64_t>& kernel,
27
+ const at::List<int64_t>& stride,
28
+ const at::List<int64_t>& padding,
29
+ const at::List<int64_t>& dilation) {
30
+ const int H = input_image_shape[0];
31
+ const int W = input_image_shape[1];
32
+ const int64_t Y_H =
33
+ (H + 2 * padding[0] - dilation[0] * (kernel[0] - 1) - 1) / stride[0] + 1;
34
+ const int64_t Y_W =
35
+ (W + 2 * padding[1] - dilation[1] * (kernel[1] - 1) - 1) / stride[1] + 1;
36
+ return {N, M, Y_H, Y_W};
37
+ }
38
+
39
+ template <>
40
+ at::SmallVector<int64_t, 5> MakeConvOutputShape<3>(
41
+ int N, // mini-batch
42
+ int M, // output channels
43
+ const std::array<int64_t, 3>& input_image_shape,
44
+ const std::vector<int64_t>& kernel,
45
+ const at::List<int64_t>& stride,
46
+ const at::List<int64_t>& padding,
47
+ const torch::List<int64_t>& dilation) {
48
+ const int D = input_image_shape[0];
49
+ const int H = input_image_shape[1];
50
+ const int W = input_image_shape[2];
51
+ const int64_t Y_D =
52
+ (D + 2 * padding[0] - dilation[0] * (kernel[0] - 1) - 1) / stride[0] + 1;
53
+ const int64_t Y_H =
54
+ (H + 2 * padding[1] - dilation[1] * (kernel[1] - 1) - 1) / stride[1] + 1;
55
+ const int64_t Y_W =
56
+ (W + 2 * padding[2] - dilation[2] * (kernel[2] - 1) - 1) / stride[2] + 1;
57
+ return {N, M, Y_D, Y_H, Y_W};
58
+ }
59
+
60
+ #endif
61
+ } // anonymous namespace
62
+ } // namespace at::native::quantized
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/FakeQuantAffine.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+
7
+ namespace at {
8
+
9
+ struct TensorIterator;
10
+
11
+ namespace native {
12
+
13
+ using fake_quant_tensor_cachemask_fn = void (*)(
14
+ Tensor& output,
15
+ Tensor& mask,
16
+ const Tensor& input,
17
+ float sc,
18
+ int64_t z_point,
19
+ int64_t quant_min,
20
+ int64_t quant_max);
21
+
22
+ using fake_quant_tensor_cachemask_tensor_qparams_fn = void (*)(
23
+ Tensor& output,
24
+ Tensor& mask,
25
+ const Tensor& input,
26
+ const Tensor& sc,
27
+ const Tensor& z_point,
28
+ const Tensor& fake_quant_enabled,
29
+ int64_t quant_min,
30
+ int64_t quant_max);
31
+
32
+ using fake_quant_learnable_grad_tensor_fn = void (*)(
33
+ TensorIterator& iter,
34
+ float scale,
35
+ float inv_scale,
36
+ int64_t zero_point,
37
+ int64_t quant_min,
38
+ int64_t quant_max,
39
+ float grad_factor);
40
+
41
+ DECLARE_DISPATCH(fake_quant_tensor_cachemask_fn, fake_quant_tensor_cachemask_stub);
42
+ DECLARE_DISPATCH(fake_quant_tensor_cachemask_tensor_qparams_fn, fake_quant_tensor_cachemask_tensor_qparams_stub);
43
+ DECLARE_DISPATCH(fake_quant_learnable_grad_tensor_fn, fake_quant_grad_learnable_tensor_stub);
44
+
45
+ using fake_quant_per_channel_fn = void (*)(
46
+ TensorIterator &iter,
47
+ int64_t quant_min,
48
+ int64_t quant_max);
49
+
50
+ using fake_quant_per_channel_cachemask_fn = void (*)(
51
+ TensorIterator &iter,
52
+ TensorIterator &iter_mask,
53
+ int64_t quant_min,
54
+ int64_t quant_max);
55
+
56
+ DECLARE_DISPATCH(fake_quant_per_channel_cachemask_fn, fake_quant_per_channel_cachemask_stub);
57
+
58
+ using fake_quant_learnable_per_channel_fn = void (*)(
59
+ TensorIterator &iter,
60
+ int64_t quant_min,
61
+ int64_t quant_max,
62
+ float grad_factor);
63
+
64
+ DECLARE_DISPATCH(fake_quant_learnable_per_channel_fn, fake_quant_grad_learnable_channel_stub);
65
+
66
+ } // namespace native
67
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/quantized/IndexKernel.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/TensorIterator.h>
3
+
4
+ namespace at {
5
+ namespace native {
6
+ using masked_fill_kernel_quantized_fn = void(*)(TensorIterator& iter, const Scalar& value, double scale, int zero_point);
7
+ using index_put_kernel_quantized_fn = void(*)(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate, double scale, int zero_point);
8
+
9
+ DECLARE_DISPATCH(masked_fill_kernel_quantized_fn, masked_fill_kernel_quantized_stub);
10
+ DECLARE_DISPATCH(index_put_kernel_quantized_fn, index_put_kernel_quantized_stub);
11
+
12
+
13
+ } // native
14
+ } // at
venv/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
2
+ //
3
+ // The LLVM Compiler Infrastructure
4
+ //
5
+ // This file is distributed under the University of Illinois Open Source
6
+ // License. See LICENSE.TXT for details.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+ //
10
+ // This file defines the AlignedCharArray and AlignedCharArrayUnion classes.
11
+ //
12
+ //===----------------------------------------------------------------------===//
13
+
14
+ // ATen: modified from llvm::AlignOf
15
+ // replaced LLVM_ALIGNAS with alignas
16
+
17
+ #pragma once
18
+
19
+ #include <cstddef>
20
+
21
+ namespace c10 {
22
+
23
+ /// \struct AlignedCharArray
24
+ /// \brief Helper for building an aligned character array type.
25
+ ///
26
+ /// This template is used to explicitly build up a collection of aligned
27
+ /// character array types. We have to build these up using a macro and explicit
28
+ /// specialization to cope with MSVC (at least till 2015) where only an
29
+ /// integer literal can be used to specify an alignment constraint. Once built
30
+ /// up here, we can then begin to indirect between these using normal C++
31
+ /// template parameters.
32
+
33
+ // MSVC requires special handling here.
34
+ #ifndef _MSC_VER
35
+
36
+ template <size_t Alignment, size_t Size>
37
+ struct AlignedCharArray {
38
+ // NOLINTNEXTLINE(*c-arrays)
39
+ alignas(Alignment) char buffer[Size];
40
+ };
41
+
42
+ #else // _MSC_VER
43
+
44
+ /// \brief Create a type with an aligned char buffer.
45
+ template <size_t Alignment, size_t Size>
46
+ struct AlignedCharArray;
47
+
48
+ // We provide special variations of this template for the most common
49
+ // alignments because __declspec(align(...)) doesn't actually work when it is
50
+ // a member of a by-value function argument in MSVC, even if the alignment
51
+ // request is something reasonably like 8-byte or 16-byte. Note that we can't
52
+ // even include the declspec with the union that forces the alignment because
53
+ // MSVC warns on the existence of the declspec despite the union member forcing
54
+ // proper alignment.
55
+
56
+ template <size_t Size>
57
+ struct AlignedCharArray<1, Size> {
58
+ union {
59
+ char aligned;
60
+ char buffer[Size];
61
+ };
62
+ };
63
+
64
+ template <size_t Size>
65
+ struct AlignedCharArray<2, Size> {
66
+ union {
67
+ short aligned;
68
+ char buffer[Size];
69
+ };
70
+ };
71
+
72
+ template <size_t Size>
73
+ struct AlignedCharArray<4, Size> {
74
+ union {
75
+ int aligned;
76
+ char buffer[Size];
77
+ };
78
+ };
79
+
80
+ template <size_t Size>
81
+ struct AlignedCharArray<8, Size> {
82
+ union {
83
+ double aligned;
84
+ char buffer[Size];
85
+ };
86
+ };
87
+
88
+ // The rest of these are provided with a __declspec(align(...)) and we simply
89
+ // can't pass them by-value as function arguments on MSVC.
90
+
91
+ #define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
92
+ template <size_t Size> \
93
+ struct AlignedCharArray<x, Size> { \
94
+ __declspec(align(x)) char buffer[Size]; \
95
+ };
96
+
97
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
98
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
99
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
100
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
101
+
102
+ #undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
103
+
104
+ #endif // _MSC_VER
105
+
106
+ namespace detail {
107
+ template <
108
+ typename T1,
109
+ typename T2 = char,
110
+ typename T3 = char,
111
+ typename T4 = char,
112
+ typename T5 = char,
113
+ typename T6 = char,
114
+ typename T7 = char,
115
+ typename T8 = char,
116
+ typename T9 = char,
117
+ typename T10 = char>
118
+ class AlignerImpl {
119
+ T1 t1;
120
+ T2 t2;
121
+ T3 t3;
122
+ T4 t4;
123
+ T5 t5;
124
+ T6 t6;
125
+ T7 t7;
126
+ T8 t8;
127
+ T9 t9;
128
+ T10 t10;
129
+
130
+ public:
131
+ AlignerImpl() = delete;
132
+ };
133
+
134
+ template <
135
+ typename T1,
136
+ typename T2 = char,
137
+ typename T3 = char,
138
+ typename T4 = char,
139
+ typename T5 = char,
140
+ typename T6 = char,
141
+ typename T7 = char,
142
+ typename T8 = char,
143
+ typename T9 = char,
144
+ typename T10 = char>
145
+ union SizerImpl {
146
+ // NOLINTNEXTLINE(*c-arrays)
147
+ char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
148
+ arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
149
+ arr9[sizeof(T9)], arr10[sizeof(T10)];
150
+ };
151
+ } // end namespace detail
152
+
153
+ /// \brief This union template exposes a suitably aligned and sized character
154
+ /// array member which can hold elements of any of up to ten types.
155
+ ///
156
+ /// These types may be arrays, structs, or any other types. The goal is to
157
+ /// expose a char array buffer member which can be used as suitable storage for
158
+ /// a placement new of any of these types. Support for more than ten types can
159
+ /// be added at the cost of more boilerplate.
160
+ template <
161
+ typename T1,
162
+ typename T2 = char,
163
+ typename T3 = char,
164
+ typename T4 = char,
165
+ typename T5 = char,
166
+ typename T6 = char,
167
+ typename T7 = char,
168
+ typename T8 = char,
169
+ typename T9 = char,
170
+ typename T10 = char>
171
+ struct AlignedCharArrayUnion
172
+ : AlignedCharArray<
173
+ alignof(detail::AlignerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>),
174
+ sizeof(::c10::detail::
175
+ SizerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>)> {};
176
+ } // end namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/bit_cast.h>
5
+
6
+ #include <limits>
7
+
8
+ C10_CLANG_DIAGNOSTIC_PUSH()
9
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
10
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
11
+ #endif
12
+
13
+ #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
14
+ #if defined(CL_SYCL_LANGUAGE_VERSION)
15
+ #include <CL/sycl.hpp> // for SYCL 1.2.1
16
+ #else
17
+ #include <sycl/sycl.hpp> // for SYCL 2020
18
+ #endif
19
+ #include <ext/oneapi/bfloat16.hpp>
20
+ #endif
21
+
22
+ namespace c10 {
23
+
24
+ /// Constructors
25
+ inline C10_HOST_DEVICE BFloat16::BFloat16(float value)
26
+ :
27
+ #if defined(__CUDACC__) && !defined(USE_ROCM) && defined(__CUDA_ARCH__) && \
28
+ __CUDA_ARCH__ >= 800
29
+ x(__bfloat16_as_ushort(__float2bfloat16(value)))
30
+ #elif defined(__SYCL_DEVICE_ONLY__) && \
31
+ defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
32
+ x(c10::bit_cast<uint16_t>(sycl::ext::oneapi::bfloat16(value)))
33
+ #else
34
+ // RNE by default
35
+ x(detail::round_to_nearest_even(value))
36
+ #endif
37
+ {
38
+ }
39
+
40
+ /// Implicit conversions
41
+ inline C10_HOST_DEVICE BFloat16::operator float() const {
42
+ #if defined(__CUDACC__) && !defined(USE_ROCM)
43
+ return __bfloat162float(*reinterpret_cast<const __nv_bfloat16*>(&x));
44
+ #elif defined(__SYCL_DEVICE_ONLY__) && \
45
+ defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
46
+ return float(*reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x));
47
+ #else
48
+ return detail::f32_from_bits(x);
49
+ #endif
50
+ }
51
+
52
+ #if defined(__CUDACC__) && !defined(USE_ROCM)
53
+ inline C10_HOST_DEVICE BFloat16::BFloat16(const __nv_bfloat16& value) {
54
+ x = *reinterpret_cast<const unsigned short*>(&value);
55
+ }
56
+ inline C10_HOST_DEVICE BFloat16::operator __nv_bfloat16() const {
57
+ return *reinterpret_cast<const __nv_bfloat16*>(&x);
58
+ }
59
+ #endif
60
+
61
+ #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
62
+ inline C10_HOST_DEVICE BFloat16::BFloat16(
63
+ const sycl::ext::oneapi::bfloat16& value) {
64
+ x = *reinterpret_cast<const unsigned short*>(&value);
65
+ }
66
+ inline C10_HOST_DEVICE BFloat16::operator sycl::ext::oneapi::bfloat16() const {
67
+ return *reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x);
68
+ }
69
+ #endif
70
+
71
+ // CUDA intrinsics
72
+
73
+ #if defined(__CUDACC__) || defined(__HIPCC__)
74
+ inline C10_DEVICE BFloat16 __ldg(const BFloat16* ptr) {
75
+ #if !defined(USE_ROCM) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
76
+ return __ldg(reinterpret_cast<const __nv_bfloat16*>(ptr));
77
+ #else
78
+ return *ptr;
79
+ #endif
80
+ }
81
+ #endif
82
+
83
+ /// Arithmetic
84
+
85
+ inline C10_HOST_DEVICE BFloat16
86
+ operator+(const BFloat16& a, const BFloat16& b) {
87
+ return static_cast<float>(a) + static_cast<float>(b);
88
+ }
89
+
90
+ inline C10_HOST_DEVICE BFloat16
91
+ operator-(const BFloat16& a, const BFloat16& b) {
92
+ return static_cast<float>(a) - static_cast<float>(b);
93
+ }
94
+
95
+ inline C10_HOST_DEVICE BFloat16
96
+ operator*(const BFloat16& a, const BFloat16& b) {
97
+ return static_cast<float>(a) * static_cast<float>(b);
98
+ }
99
+
100
+ inline C10_HOST_DEVICE BFloat16 operator/(const BFloat16& a, const BFloat16& b)
101
+ __ubsan_ignore_float_divide_by_zero__ {
102
+ return static_cast<float>(a) / static_cast<float>(b);
103
+ }
104
+
105
+ inline C10_HOST_DEVICE BFloat16 operator-(const BFloat16& a) {
106
+ return -static_cast<float>(a);
107
+ }
108
+
109
+ inline C10_HOST_DEVICE BFloat16& operator+=(BFloat16& a, const BFloat16& b) {
110
+ a = a + b;
111
+ return a;
112
+ }
113
+
114
+ inline C10_HOST_DEVICE BFloat16& operator-=(BFloat16& a, const BFloat16& b) {
115
+ a = a - b;
116
+ return a;
117
+ }
118
+
119
+ inline C10_HOST_DEVICE BFloat16& operator*=(BFloat16& a, const BFloat16& b) {
120
+ a = a * b;
121
+ return a;
122
+ }
123
+
124
+ inline C10_HOST_DEVICE BFloat16& operator/=(BFloat16& a, const BFloat16& b) {
125
+ a = a / b;
126
+ return a;
127
+ }
128
+
129
+ inline C10_HOST_DEVICE BFloat16& operator|(BFloat16& a, const BFloat16& b) {
130
+ a.x = a.x | b.x;
131
+ return a;
132
+ }
133
+
134
+ inline C10_HOST_DEVICE BFloat16& operator^(BFloat16& a, const BFloat16& b) {
135
+ a.x = a.x ^ b.x;
136
+ return a;
137
+ }
138
+
139
+ inline C10_HOST_DEVICE BFloat16& operator&(BFloat16& a, const BFloat16& b) {
140
+ a.x = a.x & b.x;
141
+ return a;
142
+ }
143
+
144
+ /// Arithmetic with floats
145
+
146
+ inline C10_HOST_DEVICE float operator+(BFloat16 a, float b) {
147
+ return static_cast<float>(a) + b;
148
+ }
149
+ inline C10_HOST_DEVICE float operator-(BFloat16 a, float b) {
150
+ return static_cast<float>(a) - b;
151
+ }
152
+ inline C10_HOST_DEVICE float operator*(BFloat16 a, float b) {
153
+ return static_cast<float>(a) * b;
154
+ }
155
+ inline C10_HOST_DEVICE float operator/(BFloat16 a, float b) {
156
+ return static_cast<float>(a) / b;
157
+ }
158
+
159
+ inline C10_HOST_DEVICE float operator+(float a, BFloat16 b) {
160
+ return a + static_cast<float>(b);
161
+ }
162
+ inline C10_HOST_DEVICE float operator-(float a, BFloat16 b) {
163
+ return a - static_cast<float>(b);
164
+ }
165
+ inline C10_HOST_DEVICE float operator*(float a, BFloat16 b) {
166
+ return a * static_cast<float>(b);
167
+ }
168
+ inline C10_HOST_DEVICE float operator/(float a, BFloat16 b) {
169
+ return a / static_cast<float>(b);
170
+ }
171
+
172
+ inline C10_HOST_DEVICE float& operator+=(float& a, const BFloat16& b) {
173
+ return a += static_cast<float>(b);
174
+ }
175
+ inline C10_HOST_DEVICE float& operator-=(float& a, const BFloat16& b) {
176
+ return a -= static_cast<float>(b);
177
+ }
178
+ inline C10_HOST_DEVICE float& operator*=(float& a, const BFloat16& b) {
179
+ return a *= static_cast<float>(b);
180
+ }
181
+ inline C10_HOST_DEVICE float& operator/=(float& a, const BFloat16& b) {
182
+ return a /= static_cast<float>(b);
183
+ }
184
+
185
+ /// Arithmetic with doubles
186
+
187
+ inline C10_HOST_DEVICE double operator+(BFloat16 a, double b) {
188
+ return static_cast<double>(a) + b;
189
+ }
190
+ inline C10_HOST_DEVICE double operator-(BFloat16 a, double b) {
191
+ return static_cast<double>(a) - b;
192
+ }
193
+ inline C10_HOST_DEVICE double operator*(BFloat16 a, double b) {
194
+ return static_cast<double>(a) * b;
195
+ }
196
+ inline C10_HOST_DEVICE double operator/(BFloat16 a, double b) {
197
+ return static_cast<double>(a) / b;
198
+ }
199
+
200
+ inline C10_HOST_DEVICE double operator+(double a, BFloat16 b) {
201
+ return a + static_cast<double>(b);
202
+ }
203
+ inline C10_HOST_DEVICE double operator-(double a, BFloat16 b) {
204
+ return a - static_cast<double>(b);
205
+ }
206
+ inline C10_HOST_DEVICE double operator*(double a, BFloat16 b) {
207
+ return a * static_cast<double>(b);
208
+ }
209
+ inline C10_HOST_DEVICE double operator/(double a, BFloat16 b) {
210
+ return a / static_cast<double>(b);
211
+ }
212
+
213
+ /// Arithmetic with ints
214
+
215
+ inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int b) {
216
+ return a + static_cast<BFloat16>(b);
217
+ }
218
+ inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int b) {
219
+ return a - static_cast<BFloat16>(b);
220
+ }
221
+ inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int b) {
222
+ return a * static_cast<BFloat16>(b);
223
+ }
224
+ inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int b) {
225
+ return a / static_cast<BFloat16>(b);
226
+ }
227
+
228
+ inline C10_HOST_DEVICE BFloat16 operator+(int a, BFloat16 b) {
229
+ return static_cast<BFloat16>(a) + b;
230
+ }
231
+ inline C10_HOST_DEVICE BFloat16 operator-(int a, BFloat16 b) {
232
+ return static_cast<BFloat16>(a) - b;
233
+ }
234
+ inline C10_HOST_DEVICE BFloat16 operator*(int a, BFloat16 b) {
235
+ return static_cast<BFloat16>(a) * b;
236
+ }
237
+ inline C10_HOST_DEVICE BFloat16 operator/(int a, BFloat16 b) {
238
+ return static_cast<BFloat16>(a) / b;
239
+ }
240
+
241
+ //// Arithmetic with int64_t
242
+
243
+ inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int64_t b) {
244
+ return a + static_cast<BFloat16>(b);
245
+ }
246
+ inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int64_t b) {
247
+ return a - static_cast<BFloat16>(b);
248
+ }
249
+ inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int64_t b) {
250
+ return a * static_cast<BFloat16>(b);
251
+ }
252
+ inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int64_t b) {
253
+ return a / static_cast<BFloat16>(b);
254
+ }
255
+
256
+ inline C10_HOST_DEVICE BFloat16 operator+(int64_t a, BFloat16 b) {
257
+ return static_cast<BFloat16>(a) + b;
258
+ }
259
+ inline C10_HOST_DEVICE BFloat16 operator-(int64_t a, BFloat16 b) {
260
+ return static_cast<BFloat16>(a) - b;
261
+ }
262
+ inline C10_HOST_DEVICE BFloat16 operator*(int64_t a, BFloat16 b) {
263
+ return static_cast<BFloat16>(a) * b;
264
+ }
265
+ inline C10_HOST_DEVICE BFloat16 operator/(int64_t a, BFloat16 b) {
266
+ return static_cast<BFloat16>(a) / b;
267
+ }
268
+
269
+ // Overloading < and > operators, because std::max and std::min use them.
270
+
271
+ inline C10_HOST_DEVICE bool operator>(BFloat16& lhs, BFloat16& rhs) {
272
+ return float(lhs) > float(rhs);
273
+ }
274
+
275
+ inline C10_HOST_DEVICE bool operator<(BFloat16& lhs, BFloat16& rhs) {
276
+ return float(lhs) < float(rhs);
277
+ }
278
+
279
+ } // namespace c10
280
+
281
+ namespace std {
282
+
283
+ template <>
284
+ class numeric_limits<c10::BFloat16> {
285
+ public:
286
+ static constexpr bool is_signed = true;
287
+ static constexpr bool is_specialized = true;
288
+ static constexpr bool is_integer = false;
289
+ static constexpr bool is_exact = false;
290
+ static constexpr bool has_infinity = true;
291
+ static constexpr bool has_quiet_NaN = true;
292
+ static constexpr bool has_signaling_NaN = true;
293
+ static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
294
+ static constexpr auto has_denorm_loss =
295
+ numeric_limits<float>::has_denorm_loss;
296
+ static constexpr auto round_style = numeric_limits<float>::round_style;
297
+ static constexpr bool is_iec559 = false;
298
+ static constexpr bool is_bounded = true;
299
+ static constexpr bool is_modulo = false;
300
+ static constexpr int digits = 8;
301
+ static constexpr int digits10 = 2;
302
+ static constexpr int max_digits10 = 4;
303
+ static constexpr int radix = 2;
304
+ static constexpr int min_exponent = -125;
305
+ static constexpr int min_exponent10 = -37;
306
+ static constexpr int max_exponent = 128;
307
+ static constexpr int max_exponent10 = 38;
308
+ static constexpr auto traps = numeric_limits<float>::traps;
309
+ static constexpr auto tinyness_before =
310
+ numeric_limits<float>::tinyness_before;
311
+
312
+ static constexpr c10::BFloat16 min() {
313
+ return c10::BFloat16(0x0080, c10::BFloat16::from_bits());
314
+ }
315
+ static constexpr c10::BFloat16 lowest() {
316
+ return c10::BFloat16(0xFF7F, c10::BFloat16::from_bits());
317
+ }
318
+ static constexpr c10::BFloat16 max() {
319
+ return c10::BFloat16(0x7F7F, c10::BFloat16::from_bits());
320
+ }
321
+ static constexpr c10::BFloat16 epsilon() {
322
+ return c10::BFloat16(0x3C00, c10::BFloat16::from_bits());
323
+ }
324
+ static constexpr c10::BFloat16 round_error() {
325
+ return c10::BFloat16(0x3F00, c10::BFloat16::from_bits());
326
+ }
327
+ static constexpr c10::BFloat16 infinity() {
328
+ return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
329
+ }
330
+ static constexpr c10::BFloat16 quiet_NaN() {
331
+ return c10::BFloat16(0x7FC0, c10::BFloat16::from_bits());
332
+ }
333
+ static constexpr c10::BFloat16 signaling_NaN() {
334
+ return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
335
+ }
336
+ static constexpr c10::BFloat16 denorm_min() {
337
+ return c10::BFloat16(0x0001, c10::BFloat16::from_bits());
338
+ }
339
+ };
340
+
341
+ } // namespace std
342
+
343
+ C10_CLANG_DIAGNOSTIC_POP()
venv/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Half.h>
5
+
6
+ C10_CLANG_DIAGNOSTIC_PUSH()
7
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
8
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
9
+ #endif
10
+
11
+ namespace std {
12
+
13
+ template <typename T>
14
+ struct is_reduced_floating_point
15
+ : std::integral_constant<
16
+ bool,
17
+ std::is_same_v<T, c10::Half> || std::is_same_v<T, c10::BFloat16>> {};
18
+
19
+ template <typename T>
20
+ constexpr bool is_reduced_floating_point_v =
21
+ is_reduced_floating_point<T>::value;
22
+
23
+ template <
24
+ typename T,
25
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
26
+ inline T acos(T a) {
27
+ return std::acos(float(a));
28
+ }
29
+ template <
30
+ typename T,
31
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
32
+ inline T asin(T a) {
33
+ return std::asin(float(a));
34
+ }
35
+ template <
36
+ typename T,
37
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
38
+ inline T atan(T a) {
39
+ return std::atan(float(a));
40
+ }
41
+ template <
42
+ typename T,
43
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
44
+ inline T atanh(T a) {
45
+ return std::atanh(float(a));
46
+ }
47
+ template <
48
+ typename T,
49
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
50
+ inline T erf(T a) {
51
+ return std::erf(float(a));
52
+ }
53
+ template <
54
+ typename T,
55
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
56
+ inline T erfc(T a) {
57
+ return std::erfc(float(a));
58
+ }
59
+ template <
60
+ typename T,
61
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
62
+ inline T exp(T a) {
63
+ return std::exp(float(a));
64
+ }
65
+ template <
66
+ typename T,
67
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
68
+ inline T expm1(T a) {
69
+ return std::expm1(float(a));
70
+ }
71
+ template <
72
+ typename T,
73
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
74
+ inline T log(T a) {
75
+ return std::log(float(a));
76
+ }
77
+ template <
78
+ typename T,
79
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
80
+ inline T log10(T a) {
81
+ return std::log10(float(a));
82
+ }
83
+ template <
84
+ typename T,
85
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
86
+ inline T log1p(T a) {
87
+ return std::log1p(float(a));
88
+ }
89
+ template <
90
+ typename T,
91
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
92
+ inline T log2(T a) {
93
+ return std::log2(float(a));
94
+ }
95
+ template <
96
+ typename T,
97
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
98
+ inline T ceil(T a) {
99
+ return std::ceil(float(a));
100
+ }
101
+ template <
102
+ typename T,
103
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
104
+ inline T cos(T a) {
105
+ return std::cos(float(a));
106
+ }
107
+ template <
108
+ typename T,
109
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
110
+ inline T floor(T a) {
111
+ return std::floor(float(a));
112
+ }
113
+ template <
114
+ typename T,
115
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
116
+ inline T nearbyint(T a) {
117
+ return std::nearbyint(float(a));
118
+ }
119
+ template <
120
+ typename T,
121
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
122
+ inline T sin(T a) {
123
+ return std::sin(float(a));
124
+ }
125
+ template <
126
+ typename T,
127
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
128
+ inline T tan(T a) {
129
+ return std::tan(float(a));
130
+ }
131
+ template <
132
+ typename T,
133
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
134
+ inline T sinh(T a) {
135
+ return std::sinh(float(a));
136
+ }
137
+ template <
138
+ typename T,
139
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
140
+ inline T cosh(T a) {
141
+ return std::cosh(float(a));
142
+ }
143
+ template <
144
+ typename T,
145
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
146
+ inline T tanh(T a) {
147
+ return std::tanh(float(a));
148
+ }
149
+ template <
150
+ typename T,
151
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
152
+ inline T trunc(T a) {
153
+ return std::trunc(float(a));
154
+ }
155
+ template <
156
+ typename T,
157
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
158
+ inline T lgamma(T a) {
159
+ return std::lgamma(float(a));
160
+ }
161
+ template <
162
+ typename T,
163
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
164
+ inline T sqrt(T a) {
165
+ return std::sqrt(float(a));
166
+ }
167
+ template <
168
+ typename T,
169
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
170
+ inline T rsqrt(T a) {
171
+ return 1.0 / std::sqrt(float(a));
172
+ }
173
+ template <
174
+ typename T,
175
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
176
+ inline T abs(T a) {
177
+ return std::abs(float(a));
178
+ }
179
+ #if defined(_MSC_VER) && defined(__CUDACC__)
180
+ template <
181
+ typename T,
182
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
183
+ inline T pow(T a, double b) {
184
+ return std::pow(float(a), float(b));
185
+ }
186
+ #else
187
+ template <
188
+ typename T,
189
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
190
+ inline T pow(T a, double b) {
191
+ return std::pow(float(a), b);
192
+ }
193
+ #endif
194
+ template <
195
+ typename T,
196
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
197
+ inline T pow(T a, T b) {
198
+ return std::pow(float(a), float(b));
199
+ }
200
+ template <
201
+ typename T,
202
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
203
+ inline T fmod(T a, T b) {
204
+ return std::fmod(float(a), float(b));
205
+ }
206
+
207
+ /*
208
+ The following function is inspired from the implementation in `musl`
209
+ Link to License: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
210
+ ----------------------------------------------------------------------
211
+ Copyright © 2005-2020 Rich Felker, et al.
212
+
213
+ Permission is hereby granted, free of charge, to any person obtaining
214
+ a copy of this software and associated documentation files (the
215
+ "Software"), to deal in the Software without restriction, including
216
+ without limitation the rights to use, copy, modify, merge, publish,
217
+ distribute, sublicense, and/or sell copies of the Software, and to
218
+ permit persons to whom the Software is furnished to do so, subject to
219
+ the following conditions:
220
+
221
+ The above copyright notice and this permission notice shall be
222
+ included in all copies or substantial portions of the Software.
223
+
224
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
225
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
226
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
227
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
228
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
229
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
230
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
231
+ ----------------------------------------------------------------------
232
+ */
233
+ template <
234
+ typename T,
235
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
236
+ C10_HOST_DEVICE inline T nextafter(T from, T to) {
237
+ // Reference:
238
+ // https://git.musl-libc.org/cgit/musl/tree/src/math/nextafter.c
239
+ using int_repr_t = uint16_t;
240
+ using float_t = T;
241
+ constexpr uint8_t bits = 16;
242
+ union {
243
+ float_t f;
244
+ int_repr_t i;
245
+ } ufrom = {from}, uto = {to};
246
+
247
+ // get a mask to get the sign bit i.e. MSB
248
+ int_repr_t sign_mask = int_repr_t{1} << (bits - 1);
249
+
250
+ // short-circuit: if either is NaN, return NaN
251
+ if (from != from || to != to) {
252
+ return from + to;
253
+ }
254
+
255
+ // short-circuit: if they are exactly the same.
256
+ if (ufrom.i == uto.i) {
257
+ return from;
258
+ }
259
+
260
+ // mask the sign-bit to zero i.e. positive
261
+ // equivalent to abs(x)
262
+ int_repr_t abs_from = ufrom.i & ~sign_mask;
263
+ int_repr_t abs_to = uto.i & ~sign_mask;
264
+ if (abs_from == 0) {
265
+ // if both are zero but with different sign,
266
+ // preserve the sign of `to`.
267
+ if (abs_to == 0) {
268
+ return to;
269
+ }
270
+ // smallest subnormal with sign of `to`.
271
+ ufrom.i = (uto.i & sign_mask) | int_repr_t{1};
272
+ return ufrom.f;
273
+ }
274
+
275
+ // if abs(from) > abs(to) or sign(from) != sign(to)
276
+ if (abs_from > abs_to || ((ufrom.i ^ uto.i) & sign_mask)) {
277
+ ufrom.i--;
278
+ } else {
279
+ ufrom.i++;
280
+ }
281
+
282
+ return ufrom.f;
283
+ }
284
+
285
+ } // namespace std
286
+
287
+ C10_CLANG_DIAGNOSTIC_POP()
venv/lib/python3.10/site-packages/torch/include/c10/util/BFloat16.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Defines the bloat16 type (brain floating-point). This representation uses
4
+ // 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa.
5
+
6
+ #include <c10/macros/Macros.h>
7
+ #include <cmath>
8
+ #include <cstdint>
9
+ #include <cstring>
10
+
11
+ #if defined(__CUDACC__) && !defined(USE_ROCM)
12
+ #include <cuda_bf16.h>
13
+ #endif
14
+
15
+ #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
16
+ #if defined(CL_SYCL_LANGUAGE_VERSION)
17
+ #include <CL/sycl.hpp> // for SYCL 1.2.1
18
+ #else
19
+ #include <sycl/sycl.hpp> // for SYCL 2020
20
+ #endif
21
+ #include <ext/oneapi/bfloat16.hpp>
22
+ #endif
23
+
24
+ namespace c10 {
25
+
26
+ namespace detail {
27
+ inline C10_HOST_DEVICE float f32_from_bits(uint16_t src) {
28
+ float res = 0;
29
+ uint32_t tmp = src;
30
+ tmp <<= 16;
31
+
32
+ #if defined(USE_ROCM)
33
+ float* tempRes;
34
+
35
+ // We should be using memcpy in order to respect the strict aliasing rule
36
+ // but it fails in the HIP environment.
37
+ tempRes = reinterpret_cast<float*>(&tmp);
38
+ res = *tempRes;
39
+ #else
40
+ std::memcpy(&res, &tmp, sizeof(tmp));
41
+ #endif
42
+
43
+ return res;
44
+ }
45
+
46
+ inline C10_HOST_DEVICE uint16_t bits_from_f32(float src) {
47
+ uint32_t res = 0;
48
+
49
+ #if defined(USE_ROCM)
50
+ // We should be using memcpy in order to respect the strict aliasing rule
51
+ // but it fails in the HIP environment.
52
+ uint32_t* tempRes = reinterpret_cast<uint32_t*>(&src);
53
+ res = *tempRes;
54
+ #else
55
+ std::memcpy(&res, &src, sizeof(res));
56
+ #endif
57
+
58
+ return res >> 16;
59
+ }
60
+
61
+ inline C10_HOST_DEVICE uint16_t round_to_nearest_even(float src) {
62
+ #if defined(USE_ROCM)
63
+ if (src != src) {
64
+ #elif defined(_MSC_VER)
65
+ if (isnan(src)) {
66
+ #else
67
+ if (std::isnan(src)) {
68
+ #endif
69
+ return UINT16_C(0x7FC0);
70
+ } else {
71
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
72
+ union {
73
+ uint32_t U32;
74
+ float F32;
75
+ };
76
+
77
+ F32 = src;
78
+ uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF);
79
+ return static_cast<uint16_t>((U32 + rounding_bias) >> 16);
80
+ }
81
+ }
82
+ } // namespace detail
83
+
84
+ struct alignas(2) BFloat16 {
85
+ uint16_t x;
86
+
87
+ // HIP wants __host__ __device__ tag, CUDA does not
88
+ #if defined(USE_ROCM)
89
+ C10_HOST_DEVICE BFloat16() = default;
90
+ #else
91
+ BFloat16() = default;
92
+ #endif
93
+
94
+ struct from_bits_t {};
95
+ static constexpr C10_HOST_DEVICE from_bits_t from_bits() {
96
+ return from_bits_t();
97
+ }
98
+
99
+ constexpr C10_HOST_DEVICE BFloat16(unsigned short bits, from_bits_t)
100
+ : x(bits){};
101
+ inline C10_HOST_DEVICE BFloat16(float value);
102
+ inline C10_HOST_DEVICE operator float() const;
103
+
104
+ #if defined(__CUDACC__) && !defined(USE_ROCM)
105
+ inline C10_HOST_DEVICE BFloat16(const __nv_bfloat16& value);
106
+ explicit inline C10_HOST_DEVICE operator __nv_bfloat16() const;
107
+ #endif
108
+
109
+ #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
110
+ inline C10_HOST_DEVICE BFloat16(const sycl::ext::oneapi::bfloat16& value);
111
+ explicit inline C10_HOST_DEVICE operator sycl::ext::oneapi::bfloat16() const;
112
+ #endif
113
+ };
114
+
115
+ } // namespace c10
116
+
117
+ #include <c10/util/BFloat16-inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <mutex>
5
+ #include <utility>
6
+
7
+ #include <c10/macros/Macros.h>
8
+ #include <c10/util/C++17.h>
9
+
10
+ namespace c10 {
11
+
12
+ // custom c10 call_once implementation to avoid the deadlock in std::call_once.
13
+ // The implementation here is a simplified version from folly and likely much
14
+ // much higher memory footprint.
15
+ template <typename Flag, typename F, typename... Args>
16
+ inline void call_once(Flag& flag, F&& f, Args&&... args) {
17
+ if (C10_LIKELY(flag.test_once())) {
18
+ return;
19
+ }
20
+ flag.call_once_slow(std::forward<F>(f), std::forward<Args>(args)...);
21
+ }
22
+
23
+ class once_flag {
24
+ public:
25
+ #ifndef _WIN32
26
+ // running into build error on MSVC. Can't seem to get a repro locally so I'm
27
+ // just avoiding constexpr
28
+ //
29
+ // C:/actions-runner/_work/pytorch/pytorch\c10/util/CallOnce.h(26): error:
30
+ // defaulted default constructor cannot be constexpr because the
31
+ // corresponding implicitly declared default constructor would not be
32
+ // constexpr 1 error detected in the compilation of
33
+ // "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/cuda/cub.cu".
34
+ constexpr
35
+ #endif
36
+ once_flag() noexcept = default;
37
+ once_flag(const once_flag&) = delete;
38
+ once_flag& operator=(const once_flag&) = delete;
39
+
40
+ private:
41
+ template <typename Flag, typename F, typename... Args>
42
+ friend void call_once(Flag& flag, F&& f, Args&&... args);
43
+
44
+ template <typename F, typename... Args>
45
+ void call_once_slow(F&& f, Args&&... args) {
46
+ std::lock_guard<std::mutex> guard(mutex_);
47
+ if (init_.load(std::memory_order_relaxed)) {
48
+ return;
49
+ }
50
+ c10::guts::invoke(std::forward<F>(f), std::forward<Args>(args)...);
51
+ init_.store(true, std::memory_order_release);
52
+ }
53
+
54
+ bool test_once() {
55
+ return init_.load(std::memory_order_acquire);
56
+ }
57
+
58
+ void reset_once() {
59
+ init_.store(false, std::memory_order_release);
60
+ }
61
+
62
+ private:
63
+ std::mutex mutex_;
64
+ std::atomic<bool> init_{false};
65
+ };
66
+
67
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/util/ConstexprCrc.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/IdWrapper.h>
4
+ #include <c10/util/string_view.h>
5
+ #include <cstddef>
6
+ #include <cstdint>
7
+
8
+ namespace c10::util {
9
+
10
+ namespace detail {
11
+ // NOLINTNEXTLINE(*c-arrays*)
12
+ constexpr uint64_t crc64_table[] = {
13
+ 0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2,
14
+ 0x8f689158505e9b8b, 0xc038e5739841b68f, 0xbae095bba8743ff6,
15
+ 0x358804e3f82aa47d, 0x4f50742bc81f2d04, 0xab28ecb46814fe75,
16
+ 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe,
17
+ 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08,
18
+ 0xe478989fa00bd371, 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8,
19
+ 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, 0xbd301a4810ffd90e,
20
+ 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285,
21
+ 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306,
22
+ 0x594882d7b0f40a7f, 0x1618f6fc78eb277b, 0x6cc0863448deae02,
23
+ 0xe3a8176c18803589, 0x997067a428b5bcf0, 0xfa11fe77117cdf02,
24
+ 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489,
25
+ 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f,
26
+ 0xb5418a5cd963f206, 0x513912c379682177, 0x2be1620b495da80e,
27
+ 0xa489f35319033385, 0xde51839b2936bafc, 0x9101f7b0e12997f8,
28
+ 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73,
29
+ 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271,
30
+ 0x08719014c99c2b08, 0x4721e43f0183060c, 0x3df994f731b68f75,
31
+ 0xb29105af61e814fe, 0xc849756751dd9d87, 0x2c31edf8f1d64ef6,
32
+ 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d,
33
+ 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b,
34
+ 0x636199d339c963f2, 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416,
35
+ 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, 0x1f423fcee22f9be0,
36
+ 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b,
37
+ 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8,
38
+ 0xfb3aa75142244891, 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec,
39
+ 0x41da32eaea507767, 0x3b024222da65fe1e, 0xa2722586f2d042ee,
40
+ 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965,
41
+ 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693,
42
+ 0xed2251ad3acf6fea, 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2,
43
+ 0xfcea28a2faafae69, 0x8632586aca9a2710, 0xc9622c4102850a14,
44
+ 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f,
45
+ 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f,
46
+ 0xaa03b5923b4c69e6, 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b,
47
+ 0x10e3202993385610, 0x6a3b50e1a30ddf69, 0x8e43c87e03060c18,
48
+ 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793,
49
+ 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865,
50
+ 0xc113bc55cb19211c, 0x5863dbf1e3ac9dec, 0x22bbab39d3991495,
51
+ 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, 0x985b3e827bed2b63,
52
+ 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8,
53
+ 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b,
54
+ 0x7c23a61ddbe6f812, 0x3373d23613f9d516, 0x49aba2fe23cc5c6f,
55
+ 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, 0x95ac9329ac4bc9b5,
56
+ 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e,
57
+ 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8,
58
+ 0xdafce7026454e4b1, 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9,
59
+ 0xcb349e0da4342532, 0xb1eceec59401ac4b, 0xfebc9aee5c1e814f,
60
+ 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4,
61
+ 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6,
62
+ 0x67ccfd4a74ab3dbf, 0x289c8961bcb410bb, 0x5244f9a98c8199c2,
63
+ 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, 0x438c80a64ce15841,
64
+ 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca,
65
+ 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c,
66
+ 0x0cdcf48d84fe7545, 0x6fbd6d5ebd3716b7, 0x15651d968d029fce,
67
+ 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, 0xaf85882d2576a038,
68
+ 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3,
69
+ 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30,
70
+ 0x4bfd10b2857d7349, 0x04ad64994d625e4d, 0x7e7514517d57d734,
71
+ 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, 0x12b5926535897936,
72
+ 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd,
73
+ 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b,
74
+ 0x5de5e64efd965432, 0xb99d7ed15d9d8743, 0xc3450e196da80e3a,
75
+ 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, 0x79a59ba2c5dc31cc,
76
+ 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47,
77
+ 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628,
78
+ 0xc5bed8cc867b7f51, 0x8aeeace74e645255, 0xf036dc2f7e51db2c,
79
+ 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, 0xe1fea520be311aaf,
80
+ 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124,
81
+ 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2,
82
+ 0xaeaed10b762e37ab, 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222,
83
+ 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, 0xf7e653dcc6da3dd4,
84
+ 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f,
85
+ 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc,
86
+ 0x139ecb4366d1eea5, 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8,
87
+ 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, 0xb0c7b7e3c7593bd8,
88
+ 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053,
89
+ 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5,
90
+ 0xff97c3c80f4616dc, 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4,
91
+ 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, 0xdbd7be24370c7322,
92
+ 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9,
93
+ 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab,
94
+ 0x42a7d9801fb9cfd2, 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf,
95
+ 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, 0x66e7a46c27f3aa2c,
96
+ 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7,
97
+ 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51,
98
+ 0x29b7d047efec8728,
99
+ };
100
+
101
+ inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA uint64_t
102
+ crc64impl(uint64_t accumulator, const char* data, size_t size) {
103
+ for (size_t i = 0; i < size; ++i) {
104
+ accumulator =
105
+ crc64_table[(accumulator ^ data[i]) & 0xFF] ^ (accumulator >> 8);
106
+ }
107
+ return accumulator;
108
+ }
109
+ } // namespace detail
110
+
111
+ struct crc64_t final : IdWrapper<crc64_t, uint64_t> {
112
+ constexpr crc64_t(uint64_t checksum) : IdWrapper(checksum) {}
113
+ constexpr uint64_t checksum() const {
114
+ return this->underlyingId();
115
+ }
116
+ };
117
+
118
+ // CRC64 with Jones coefficients and an init value of 0.
119
+ inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t
120
+ crc64(const char* str, size_t size) {
121
+ return crc64_t{detail::crc64impl(0, str, size)};
122
+ }
123
+
124
+ inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t crc64(c10::string_view str) {
125
+ return crc64(str.data(), str.size());
126
+ }
127
+ } // namespace c10::util
128
+
129
+ // Allow usage of crc64_t in std::unordered_set
130
+ C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::crc64_t);
venv/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * This file provides portable macros for marking declarations
5
+ * as deprecated. You should generally use C10_DEPRECATED,
6
+ * except when marking 'using' declarations as deprecated,
7
+ * in which case you should use C10_DEFINE_DEPRECATED_USING
8
+ * (due to portability concerns).
9
+ */
10
+
11
+ // Sample usage:
12
+ //
13
+ // C10_DEPRECATED void bad_func();
14
+ // struct C10_DEPRECATED BadStruct {
15
+ // ...
16
+ // };
17
+
18
+ // NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses
19
+ // the "__declspec(deprecated)" implementation and not the C++14
20
+ // "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on
21
+ // MSVC, but ran into issues with some older MSVC versions.
22
+ #if (defined(__cplusplus) && __cplusplus >= 201402L)
23
+ #define C10_DEPRECATED [[deprecated]]
24
+ #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
25
+ #elif defined(__GNUC__)
26
+ #define C10_DEPRECATED __attribute__((deprecated))
27
+ // TODO Is there some way to implement this?
28
+ #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated))
29
+
30
+ #elif defined(_MSC_VER)
31
+ #define C10_DEPRECATED __declspec(deprecated)
32
+ #define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message))
33
+ #else
34
+ #warning "You need to implement C10_DEPRECATED for this compiler"
35
+ #define C10_DEPRECATED
36
+ #endif
37
+
38
+ // Sample usage:
39
+ //
40
+ // C10_DEFINE_DEPRECATED_USING(BadType, int)
41
+ //
42
+ // which is the portable version of
43
+ //
44
+ // using BadType [[deprecated]] = int;
45
+
46
+ // technically [[deprecated]] syntax is from c++14 standard, but it works in
47
+ // many compilers.
48
+ #if defined(__has_cpp_attribute)
49
+ #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__)
50
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
51
+ using TypeName [[deprecated]] = TypeThingy;
52
+ #endif
53
+ #endif
54
+
55
+ #if defined(_MSC_VER)
56
+ #if defined(__CUDACC__)
57
+ // neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows;
58
+ // you get the error:
59
+ //
60
+ // error: attribute does not apply to any entity
61
+ //
62
+ // So we just turn the macro off in this case.
63
+ #if defined(C10_DEFINE_DEPRECATED_USING)
64
+ #undef C10_DEFINE_DEPRECATED_USING
65
+ #endif
66
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
67
+ using TypeName = TypeThingy;
68
+ #else
69
+ // [[deprecated]] does work in windows without nvcc, though msc doesn't support
70
+ // `__has_cpp_attribute` when c++14 is supported, otherwise
71
+ // __declspec(deprecated) is used as the alternative.
72
+ #ifndef C10_DEFINE_DEPRECATED_USING
73
+ #if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L
74
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
75
+ using TypeName [[deprecated]] = TypeThingy;
76
+ #else
77
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
78
+ using TypeName = __declspec(deprecated) TypeThingy;
79
+ #endif
80
+ #endif
81
+ #endif
82
+ #endif
83
+
84
+ #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__)
85
+ // nvcc has a bug where it doesn't understand __attribute__((deprecated))
86
+ // declarations even when the host compiler supports it. We'll only use this gcc
87
+ // attribute when not cuda, and when using a GCC compiler that doesn't support
88
+ // the c++14 syntax we checked for above (available in __GNUC__ >= 5)
89
+ #if !defined(__CUDACC__)
90
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
91
+ using TypeName __attribute__((deprecated)) = TypeThingy;
92
+ #else
93
+ // using cuda + gcc < 5, neither deprecated syntax is available so turning off.
94
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
95
+ using TypeName = TypeThingy;
96
+ #endif
97
+ #endif
98
+
99
+ #if !defined(C10_DEFINE_DEPRECATED_USING)
100
+ #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler"
101
+ #define C10_DEFINE_DEPRECATED_USING
102
+ #endif
venv/lib/python3.10/site-packages/torch/include/c10/util/Exception.h ADDED
@@ -0,0 +1,711 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_EXCEPTION_H_
2
+ #define C10_UTIL_EXCEPTION_H_
3
+
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/StringUtil.h>
7
+
8
+ #include <cstdint>
9
+ #include <exception>
10
+ #include <string>
11
+ #include <variant>
12
+ #include <vector>
13
+
14
+ #if defined(_MSC_VER) && _MSC_VER <= 1900
15
+ #define __func__ __FUNCTION__
16
+ #endif
17
+
18
+ namespace c10 {
19
+
20
+ /// The primary ATen error class.
21
+ /// Provides a complete error message with source location information via
22
+ /// `what()`, and a more concise message via `what_without_backtrace()`.
23
+ /// Don't throw this directly; use TORCH_CHECK/TORCH_INTERNAL_ASSERT instead.
24
+ ///
25
+ /// NB: c10::Error is handled specially by the default torch to suppress the
26
+ /// backtrace, see torch/csrc/Exceptions.h
27
+ class C10_API Error : public std::exception {
28
+ // The actual error message.
29
+ std::string msg_;
30
+
31
+ // Context for the message (in order of decreasing specificity). Context will
32
+ // be automatically formatted appropriately, so it is not necessary to add
33
+ // extra leading/trailing newlines to strings inside this vector
34
+ std::vector<std::string> context_;
35
+
36
+ // The C++ backtrace at the point when this exception was raised. This
37
+ // may be empty if there is no valid backtrace. (We don't use optional
38
+ // here to reduce the dependencies this file has.)
39
+ std::string backtrace_;
40
+
41
+ // These two are derived fields from msg_stack_ and backtrace_, but we need
42
+ // fields for the strings so that we can return a const char* (as the
43
+ // signature of std::exception requires). Currently, the invariant
44
+ // is that these fields are ALWAYS populated consistently with respect
45
+ // to msg_stack_ and backtrace_.
46
+ std::string what_;
47
+ std::string what_without_backtrace_;
48
+
49
+ // This is a little debugging trick: you can stash a relevant pointer
50
+ // in caller, and then when you catch the exception, you can compare
51
+ // against pointers you have on hand to get more information about
52
+ // where the exception came from. In Caffe2, this is used to figure
53
+ // out which operator raised an exception.
54
+ const void* caller_;
55
+
56
+ public:
57
+ // PyTorch-style Error constructor. NB: the implementation of this
58
+ // is actually in Logging.cpp
59
+ Error(SourceLocation source_location, std::string msg);
60
+
61
+ // Caffe2-style error message
62
+ Error(
63
+ const char* file,
64
+ const uint32_t line,
65
+ const char* condition,
66
+ const std::string& msg,
67
+ const std::string& backtrace,
68
+ const void* caller = nullptr);
69
+
70
+ // Base constructor
71
+ Error(std::string msg, std::string backtrace, const void* caller = nullptr);
72
+
73
+ // Add some new context to the message stack. The last added context
74
+ // will be formatted at the end of the context list upon printing.
75
+ // WARNING: This method is O(n) in the size of the stack, so don't go
76
+ // wild adding a ridiculous amount of context to error messages.
77
+ void add_context(std::string msg);
78
+
79
+ const std::string& msg() const {
80
+ return msg_;
81
+ }
82
+
83
+ const std::vector<std::string>& context() const {
84
+ return context_;
85
+ }
86
+
87
+ const std::string& backtrace() const {
88
+ return backtrace_;
89
+ }
90
+
91
+ /// Returns the complete error message, including the source location.
92
+ /// The returned pointer is invalidated if you call add_context() on
93
+ /// this object.
94
+ const char* what() const noexcept override {
95
+ return what_.c_str();
96
+ }
97
+
98
+ const void* caller() const noexcept {
99
+ return caller_;
100
+ }
101
+
102
+ /// Returns only the error message string, without source location.
103
+ /// The returned pointer is invalidated if you call add_context() on
104
+ /// this object.
105
+ virtual const char* what_without_backtrace() const noexcept {
106
+ return what_without_backtrace_.c_str();
107
+ }
108
+
109
+ private:
110
+ void refresh_what();
111
+ std::string compute_what(bool include_backtrace) const;
112
+ };
113
+
114
+ class C10_API Warning {
115
+ public:
116
+ class C10_API UserWarning {};
117
+ class C10_API DeprecationWarning {};
118
+
119
+ using warning_variant_t = std::variant<UserWarning, DeprecationWarning>;
120
+
121
+ Warning(
122
+ warning_variant_t type,
123
+ const SourceLocation& source_location,
124
+ std::string msg,
125
+ bool verbatim);
126
+
127
+ Warning(
128
+ warning_variant_t type,
129
+ SourceLocation source_location,
130
+ const char* msg,
131
+ bool verbatim);
132
+
133
+ Warning(
134
+ warning_variant_t type,
135
+ SourceLocation source_location,
136
+ ::c10::detail::CompileTimeEmptyString msg,
137
+ bool verbatim);
138
+
139
+ // Getters for members
140
+ warning_variant_t type() const;
141
+ const SourceLocation& source_location() const;
142
+ const std::string& msg() const;
143
+ bool verbatim() const;
144
+
145
+ private:
146
+ // The type of warning
147
+ warning_variant_t type_;
148
+
149
+ // Where the warning happened.
150
+ SourceLocation source_location_;
151
+
152
+ // The actual warning message.
153
+ std::string msg_;
154
+
155
+ // See note: [Verbatim Warnings]
156
+ bool verbatim_;
157
+ };
158
+
159
+ using UserWarning = Warning::UserWarning;
160
+ using DeprecationWarning = Warning::DeprecationWarning;
161
+
162
+ // Issue a warning with a given message. Dispatched to the current
163
+ // warning handler.
164
+ void C10_API warn(const Warning& warning);
165
+
166
+ class C10_API WarningHandler {
167
+ public:
168
+ virtual ~WarningHandler() = default;
169
+ /// The default warning handler. Prints the message to stderr.
170
+ virtual void process(const Warning& warning);
171
+ };
172
+
173
+ namespace WarningUtils {
174
+
175
+ // Note: [Verbatim Warnings]
176
+ // Warnings originating in C++ code can appear out-of-place to Python users:
177
+ // a user runs a line in Python, but the warning references a line in C++.
178
+ // Some parts of PyTorch, like the JIT, are cognizant of this mismatch
179
+ // and take care to map warnings back to the user's program, but most
180
+ // of PyTorch simply throws a context-free warning. To allow warning
181
+ // handlers to add context where appropriate, warn takes the
182
+ // "verbatim" flag. When this is false a warning handler might append
183
+ // the C++ warning to a Python warning message that relates the warning
184
+ // back to the user's program. Callers who have already accounted for
185
+ // context in their warnings should set verbatim to true so their warnings
186
+ // appear without modification.
187
+
188
+ /// Sets the global warning handler. This is not thread-safe, so it should
189
+ /// generally be called once during initialization or while holding the GIL
190
+ /// for programs that use python.
191
+ /// User is responsible for keeping the WarningHandler alive until
192
+ /// it is not needed.
193
+ C10_API void set_warning_handler(WarningHandler* handler) noexcept(true);
194
+ /// Gets the global warning handler.
195
+ C10_API WarningHandler* get_warning_handler() noexcept(true);
196
+
197
+ class C10_API WarningHandlerGuard {
198
+ WarningHandler* prev_handler_;
199
+
200
+ public:
201
+ WarningHandlerGuard(WarningHandler* new_handler)
202
+ : prev_handler_(c10::WarningUtils::get_warning_handler()) {
203
+ c10::WarningUtils::set_warning_handler(new_handler);
204
+ }
205
+ ~WarningHandlerGuard() {
206
+ c10::WarningUtils::set_warning_handler(prev_handler_);
207
+ }
208
+ };
209
+
210
+ /// The TORCH_WARN_ONCE macro is difficult to test for. Use
211
+ /// setWarnAlways(true) to turn it into TORCH_WARN, which can be
212
+ /// tested for more easily.
213
+ C10_API void set_warnAlways(bool) noexcept(true);
214
+ C10_API bool get_warnAlways() noexcept(true);
215
+
216
+ // A RAII guard that sets warn_always (not thread-local) on
217
+ // construction, and sets it back to the original value upon destruction.
218
+ struct C10_API WarnAlways {
219
+ public:
220
+ explicit WarnAlways(bool setting = true);
221
+ ~WarnAlways();
222
+
223
+ private:
224
+ bool prev_setting;
225
+ };
226
+
227
+ } // namespace WarningUtils
228
+
229
+ // Like Error, but we always report the C++ backtrace, instead of only
230
+ // reporting when TORCH_SHOW_CPP_STACKTRACES
231
+ class C10_API ErrorAlwaysShowCppStacktrace : public Error {
232
+ using Error::Error;
233
+ const char* what_without_backtrace() const noexcept override {
234
+ return what();
235
+ }
236
+ };
237
+
238
+ // Used in ATen for out-of-bound indices that can reasonably only be detected
239
+ // lazily inside a kernel (See: advanced indexing). These turn into
240
+ // IndexError when they cross to Python.
241
+ class C10_API IndexError : public Error {
242
+ using Error::Error;
243
+ };
244
+
245
+ // Used in ATen for invalid values. These turn into
246
+ // ValueError when they cross to Python.
247
+ class C10_API ValueError : public Error {
248
+ using Error::Error;
249
+ };
250
+
251
+ // Used in ATen for invalid types. These turn into
252
+ // TypeError when they cross to Python.
253
+ class C10_API TypeError : public Error {
254
+ using Error::Error;
255
+ };
256
+
257
+ // Used in ATen for functionality that is not implemented. These turn into
258
+ // NotImplementedError when they cross to Python.
259
+ class C10_API NotImplementedError : public Error {
260
+ using Error::Error;
261
+ };
262
+
263
+ // Used in ATen for non finite indices. These turn into
264
+ // ExitException when they cross to Python.
265
+ class C10_API EnforceFiniteError : public Error {
266
+ using Error::Error;
267
+ };
268
+
269
+ // Used in Onnxifi backend lowering. These turn into
270
+ // ExitException when they cross to Python.
271
+ class C10_API OnnxfiBackendSystemError : public Error {
272
+ using Error::Error;
273
+ };
274
+
275
+ // Used for numerical errors from the linalg module. These
276
+ // turn into LinAlgError when they cross into Python.
277
+ class C10_API LinAlgError : public Error {
278
+ using Error::Error;
279
+ };
280
+
281
+ class C10_API OutOfMemoryError : public Error {
282
+ using Error::Error;
283
+ };
284
+
285
+ // Base error type for all distributed errors.
286
+ // These turn into DistError when they cross into Python.
287
+ class C10_API DistError : public Error {
288
+ using Error::Error;
289
+ };
290
+
291
+ // Used for collective communication library errors from the distributed module.
292
+ // These turn into DistBackendError when they cross into Python.
293
+ class C10_API DistBackendError : public DistError {
294
+ using DistError::DistError;
295
+ };
296
+
297
+ // Used for errors originating from the store.
298
+ // These turn into DistStoreError when they cross into Python.
299
+ class C10_API DistStoreError : public DistError {
300
+ using DistError::DistError;
301
+ };
302
+
303
+ // Used for errors originating from the TCP/IP stack and not from collective
304
+ // libraries. These turn into DistNetworkError when they cross into Python.
305
+ class C10_API DistNetworkError : public DistError {
306
+ using DistError::DistError;
307
+ };
308
+
309
+ // A utility function to return an exception std::string by prepending its
310
+ // exception type before its what() content
311
+ C10_API std::string GetExceptionString(const std::exception& e);
312
+
313
+ } // namespace c10
314
+
315
+ // Private helper macro for implementing TORCH_INTERNAL_ASSERT and TORCH_CHECK
316
+ //
317
+ // Note: In the debug build With MSVC, __LINE__ might be of long type (a.k.a
318
+ // int32_t), which is different from the definition of `SourceLocation` that
319
+ // requires unsigned int (a.k.a uint32_t) and may cause a compile error with the
320
+ // message: error C2397: conversion from 'long' to 'uint32_t' requires a
321
+ // narrowing conversion Here the static cast is used to pass the build. if this
322
+ // is used inside a lambda the __func__ macro expands to operator(), which isn't
323
+ // very useful, but hard to fix in a macro so suppressing the warning.
324
+ #define C10_THROW_ERROR(err_type, msg) \
325
+ throw ::c10::err_type( \
326
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
327
+
328
+ #define C10_BUILD_ERROR(err_type, msg) \
329
+ ::c10::err_type({__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
330
+
331
+ // Private helper macro for workaround MSVC misexpansion of nested macro
332
+ // invocations involving __VA_ARGS__. See
333
+ // https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
334
+ #define C10_EXPAND_MSVC_WORKAROUND(x) x
335
+
336
+ // On nvcc, C10_UNLIKELY thwarts missing return statement analysis. In cases
337
+ // where the unlikely expression may be a constant, use this macro to ensure
338
+ // return statement analysis keeps working (at the cost of not getting the
339
+ // likely/unlikely annotation on nvcc).
340
+ // https://github.com/pytorch/pytorch/issues/21418
341
+ //
342
+ // Currently, this is only used in the error reporting macros below. If you
343
+ // want to use it more generally, move me to Macros.h
344
+ //
345
+ // TODO: Brian Vaughan observed that we might be able to get this to work on
346
+ // nvcc by writing some sort of C++ overload that distinguishes constexpr inputs
347
+ // from non-constexpr. Since there isn't any evidence that losing C10_UNLIKELY
348
+ // in nvcc is causing us perf problems, this is not yet implemented, but this
349
+ // might be an interesting piece of C++ code for an intrepid bootcamper to
350
+ // write.
351
+ #if defined(__CUDACC__)
352
+ #define C10_UNLIKELY_OR_CONST(e) e
353
+ #else
354
+ #define C10_UNLIKELY_OR_CONST(e) C10_UNLIKELY(e)
355
+ #endif
356
+
357
+ // ----------------------------------------------------------------------------
358
+ // Error reporting macros
359
+ // ----------------------------------------------------------------------------
360
+
361
+ #ifdef STRIP_ERROR_MESSAGES
362
+ #define TORCH_RETHROW(e, ...) throw
363
+ #else
364
+ #define TORCH_RETHROW(e, ...) \
365
+ do { \
366
+ e.add_context(::c10::str(__VA_ARGS__)); \
367
+ throw; \
368
+ } while (false)
369
+ #endif
370
+
371
+ // A utility macro to provide assert()-like functionality; that is, enforcement
372
+ // of internal invariants in code. It supports an arbitrary number of extra
373
+ // arguments (evaluated only on failure), which will be printed in the assert
374
+ // failure message using operator<< (this is useful to print some variables
375
+ // which may be useful for debugging.)
376
+ //
377
+ // Usage:
378
+ // TORCH_INTERNAL_ASSERT(should_be_true);
379
+ // TORCH_INTERNAL_ASSERT(x == 0, "x = ", x);
380
+ //
381
+ // Assuming no bugs in PyTorch, the conditions tested by this macro should
382
+ // always be true; e.g., it should be possible to disable all of these
383
+ // conditions without changing observable user behavior. If you would like to
384
+ // do error reporting for user input, please use TORCH_CHECK instead.
385
+ //
386
+ // NOTE: It is SAFE to use this macro in production code; on failure, this
387
+ // simply raises an exception, it does NOT unceremoniously quit the process
388
+ // (unlike assert()).
389
+ //
390
+ #ifdef STRIP_ERROR_MESSAGES
391
+ #define TORCH_INTERNAL_ASSERT(cond, ...) \
392
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
393
+ ::c10::detail::torchCheckFail( \
394
+ __func__, \
395
+ __FILE__, \
396
+ static_cast<uint32_t>(__LINE__), \
397
+ #cond " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__)); \
398
+ }
399
+ #else
400
+ // It would be nice if we could build a combined string literal out of
401
+ // the TORCH_INTERNAL_ASSERT prefix and a user-provided string literal
402
+ // as the first argument, but there doesn't seem to be any good way to
403
+ // do that while still supporting having a first argument that isn't a
404
+ // string literal.
405
+ #define TORCH_INTERNAL_ASSERT(cond, ...) \
406
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
407
+ ::c10::detail::torchInternalAssertFail( \
408
+ __func__, \
409
+ __FILE__, \
410
+ static_cast<uint32_t>(__LINE__), \
411
+ #cond \
412
+ " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__) ":" C10_STRINGIZE( \
413
+ __LINE__) ", please report a bug to PyTorch. ", \
414
+ c10::str(__VA_ARGS__)); \
415
+ }
416
+ #endif
417
+
418
+ // A utility macro to make it easier to test for error conditions from user
419
+ // input. Like TORCH_INTERNAL_ASSERT, it supports an arbitrary number of extra
420
+ // arguments (evaluated only on failure), which will be printed in the error
421
+ // message using operator<< (e.g., you can pass any object which has
422
+ // operator<< defined. Most objects in PyTorch have these definitions!)
423
+ //
424
+ // Usage:
425
+ // TORCH_CHECK(should_be_true); // A default error message will be provided
426
+ // // in this case; but we recommend writing an
427
+ // // explicit error message, as it is more
428
+ // // user friendly.
429
+ // TORCH_CHECK(x == 0, "Expected x to be 0, but got ", x);
430
+ //
431
+ // On failure, this macro will raise an exception. If this exception propagates
432
+ // to Python, it will convert into a Python RuntimeError.
433
+ //
434
+ // NOTE: It is SAFE to use this macro in production code; on failure, this
435
+ // simply raises an exception, it does NOT unceremoniously quit the process
436
+ // (unlike CHECK() from glog.)
437
+ //
438
+ #define TORCH_CHECK_WITH(error_t, cond, ...) \
439
+ TORCH_CHECK_WITH_MSG(error_t, cond, "", __VA_ARGS__)
440
+
441
+ #ifdef STRIP_ERROR_MESSAGES
442
+ #define TORCH_CHECK_MSG(cond, type, ...) \
443
+ (#cond #type " CHECK FAILED at " C10_STRINGIZE(__FILE__))
444
+ #define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
445
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
446
+ C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
447
+ }
448
+ #else
449
+
450
+ namespace c10::detail {
451
+ template <typename... Args>
452
+ decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) {
453
+ return ::c10::str(args...);
454
+ }
455
+ inline C10_API const char* torchCheckMsgImpl(const char* msg) {
456
+ return msg;
457
+ }
458
+ // If there is just 1 user-provided C-string argument, use it.
459
+ inline C10_API const char* torchCheckMsgImpl(
460
+ const char* /*msg*/,
461
+ const char* args) {
462
+ return args;
463
+ }
464
+ } // namespace c10::detail
465
+
466
+ #define TORCH_CHECK_MSG(cond, type, ...) \
467
+ (::c10::detail::torchCheckMsgImpl( \
468
+ "Expected " #cond \
469
+ " to be true, but got false. " \
470
+ "(Could this error message be improved? If so, " \
471
+ "please report an enhancement request to PyTorch.)", \
472
+ ##__VA_ARGS__))
473
+ #define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
474
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
475
+ C10_THROW_ERROR(error_t, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
476
+ }
477
+ #endif
478
+
479
+ namespace c10::detail {
480
+
481
+ [[noreturn]] C10_API void torchCheckFail(
482
+ const char* func,
483
+ const char* file,
484
+ uint32_t line,
485
+ const std::string& msg);
486
+ [[noreturn]] C10_API void torchCheckFail(
487
+ const char* func,
488
+ const char* file,
489
+ uint32_t line,
490
+ const char* msg);
491
+
492
+ // The c10::str() call that creates userMsg can have 1 of 3 return
493
+ // types depending on the number and types of arguments passed to
494
+ // TORCH_INTERNAL_ASSERT. 0 arguments will get a
495
+ // CompileTimeEmptyString, 1 const char * will be passed straight
496
+ // through, and anything else will get converted to std::string.
497
+ [[noreturn]] C10_API void torchInternalAssertFail(
498
+ const char* func,
499
+ const char* file,
500
+ uint32_t line,
501
+ const char* condMsg,
502
+ const char* userMsg);
503
+ [[noreturn]] inline C10_API void torchInternalAssertFail(
504
+ const char* func,
505
+ const char* file,
506
+ uint32_t line,
507
+ const char* condMsg,
508
+ ::c10::detail::CompileTimeEmptyString /*userMsg*/) {
509
+ torchCheckFail(func, file, line, condMsg);
510
+ }
511
+ [[noreturn]] C10_API void torchInternalAssertFail(
512
+ const char* func,
513
+ const char* file,
514
+ uint32_t line,
515
+ const char* condMsg,
516
+ const std::string& userMsg);
517
+
518
+ } // namespace c10::detail
519
+
520
+ #ifdef STRIP_ERROR_MESSAGES
521
+ #define TORCH_CHECK(cond, ...) \
522
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
523
+ ::c10::detail::torchCheckFail( \
524
+ __func__, \
525
+ __FILE__, \
526
+ static_cast<uint32_t>(__LINE__), \
527
+ TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \
528
+ }
529
+ #else
530
+ #define TORCH_CHECK(cond, ...) \
531
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
532
+ ::c10::detail::torchCheckFail( \
533
+ __func__, \
534
+ __FILE__, \
535
+ static_cast<uint32_t>(__LINE__), \
536
+ TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \
537
+ }
538
+ #endif
539
+
540
+ // An utility macro that does what `TORCH_CHECK` does if compiled in the host
541
+ // code, otherwise does nothing. Supposed to be used in the code shared between
542
+ // host and device code as an alternative for `TORCH_CHECK`.
543
+ #if defined(__CUDACC__) || defined(__HIPCC__)
544
+ #define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...)
545
+ #else
546
+ #define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) TORCH_CHECK(cond, ##__VA_ARGS__)
547
+ #endif
548
+
549
+ // Debug only version of TORCH_INTERNAL_ASSERT. This macro only checks in debug
550
+ // build, and does nothing in release build. It is appropriate to use
551
+ // in situations where you want to add an assert to a hotpath, but it is
552
+ // too expensive to run this assert on production builds.
553
+ #ifdef NDEBUG
554
+ // Optimized version - generates no code.
555
+ #define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
556
+ while (false) \
557
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
558
+ #else
559
+ #define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
560
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
561
+ #endif
562
+
563
+ // TODO: We're going to get a lot of similar looking string literals
564
+ // this way; check if this actually affects binary size.
565
+
566
+ // Like TORCH_CHECK, but raises LinAlgError instead of Error.
567
+ #define TORCH_CHECK_LINALG(cond, ...) \
568
+ TORCH_CHECK_WITH_MSG(LinAlgError, cond, "LINALG", __VA_ARGS__)
569
+
570
+ // Like TORCH_CHECK, but raises IndexErrors instead of Errors.
571
+ #define TORCH_CHECK_INDEX(cond, ...) \
572
+ TORCH_CHECK_WITH_MSG(IndexError, cond, "INDEX", __VA_ARGS__)
573
+
574
+ // Like TORCH_CHECK, but raises ValueErrors instead of Errors.
575
+ #define TORCH_CHECK_VALUE(cond, ...) \
576
+ TORCH_CHECK_WITH_MSG(ValueError, cond, "VALUE", __VA_ARGS__)
577
+
578
+ // Like TORCH_CHECK, but raises TypeErrors instead of Errors.
579
+ #define TORCH_CHECK_TYPE(cond, ...) \
580
+ TORCH_CHECK_WITH_MSG(TypeError, cond, "TYPE", __VA_ARGS__)
581
+
582
+ // Like TORCH_CHECK, but raises NotImplementedErrors instead of Errors.
583
+ #define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) \
584
+ TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__)
585
+
586
+ #define TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(cond, ...) \
587
+ TORCH_CHECK_WITH_MSG( \
588
+ ErrorAlwaysShowCppStacktrace, cond, "TYPE", ##__VA_ARGS__)
589
+
590
+ #ifdef STRIP_ERROR_MESSAGES
591
+ #define WARNING_MESSAGE_STRING(...) \
592
+ ::c10::detail::CompileTimeEmptyString {}
593
+ #else
594
+ #define WARNING_MESSAGE_STRING(...) ::c10::str(__VA_ARGS__)
595
+ #endif
596
+
597
+ // Report a warning to the user. Accepts an arbitrary number of extra
598
+ // arguments which are concatenated into the warning message using operator<<
599
+ //
600
+ #ifdef DISABLE_WARN
601
+ #define _TORCH_WARN_WITH(...) ((void)0);
602
+ #else
603
+ #define _TORCH_WARN_WITH(warning_t, ...) \
604
+ ::c10::warn(::c10::Warning( \
605
+ warning_t(), \
606
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
607
+ WARNING_MESSAGE_STRING(__VA_ARGS__), \
608
+ false));
609
+ #endif
610
+
611
+ #define TORCH_WARN(...) _TORCH_WARN_WITH(::c10::UserWarning, __VA_ARGS__);
612
+
613
+ #define TORCH_WARN_DEPRECATION(...) \
614
+ _TORCH_WARN_WITH(::c10::DeprecationWarning, __VA_ARGS__);
615
+
616
+ // Report a warning to the user only once. Accepts an arbitrary number of extra
617
+ // arguments which are concatenated into the warning message using operator<<
618
+ //
619
+ #define _TORCH_WARN_ONCE(...) \
620
+ C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \
621
+ [&] { \
622
+ TORCH_WARN(__VA_ARGS__); \
623
+ return true; \
624
+ }()
625
+
626
+ #ifdef DISABLE_WARN
627
+ #define TORCH_WARN_ONCE(...) ((void)0);
628
+ #else
629
+ #define TORCH_WARN_ONCE(...) \
630
+ if (::c10::WarningUtils::get_warnAlways()) { \
631
+ TORCH_WARN(__VA_ARGS__); \
632
+ } else { \
633
+ _TORCH_WARN_ONCE(__VA_ARGS__); \
634
+ }
635
+ #endif
636
+
637
+ // Report an error with a specific argument
638
+ // NOTE: using the argument name in TORCH_CHECK's message is preferred
639
+ #define TORCH_CHECK_ARG(cond, argN, ...) \
640
+ TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__)
641
+
642
+ // ----------------------------------------------------------------------------
643
+ // Deprecated macros
644
+ // ----------------------------------------------------------------------------
645
+
646
+ namespace c10::detail {
647
+
648
+ /*
649
+ // Deprecation disabled until we fix sites in our codebase
650
+ C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg)
651
+ instead.")
652
+ */
653
+ inline void deprecated_AT_ERROR() {}
654
+
655
+ /*
656
+ // Deprecation disabled until we fix sites in our codebase
657
+ C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an
658
+ internal invariant failure, use " \
659
+ "TORCH_INTERNAL_ASSERT instead; if you mean to do user
660
+ error checking, use " \ "TORCH_CHECK. See
661
+ https://github.com/pytorch/pytorch/issues/20287 for more details.")
662
+ */
663
+ inline void deprecated_AT_ASSERT() {}
664
+
665
+ /*
666
+ // Deprecation disabled until we fix sites in our codebase
667
+ C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an
668
+ internal invariant failure, use " \
669
+ "TORCH_INTERNAL_ASSERT instead; if you mean to do user
670
+ error checking, use " \ "TORCH_CHECK. See
671
+ https://github.com/pytorch/pytorch/issues/20287 for more details.")
672
+ */
673
+ inline void deprecated_AT_ASSERTM() {}
674
+
675
+ } // namespace c10::detail
676
+
677
+ // Deprecated alias; this alias was deprecated because people kept mistakenly
678
+ // using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK
679
+ // instead. See https://github.com/pytorch/pytorch/issues/20287 for more
680
+ // details.
681
+ #define AT_ASSERT(...) \
682
+ do { \
683
+ ::c10::detail::deprecated_AT_ASSERT(); \
684
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)); \
685
+ } while (false)
686
+
687
+ // Deprecated alias, like AT_ASSERT. The new TORCH_INTERNAL_ASSERT macro
688
+ // supports both 0-ary and variadic calls, so having a separate
689
+ // message-accepting macro is not necessary.
690
+ //
691
+ // NB: we MUST include cond explicitly here, as MSVC will miscompile the macro
692
+ // expansion, shunting all of __VA_ARGS__ to cond. An alternate workaround
693
+ // can be seen at
694
+ // https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
695
+ #define AT_ASSERTM(cond, ...) \
696
+ do { \
697
+ ::c10::detail::deprecated_AT_ASSERTM(); \
698
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__)); \
699
+ } while (false)
700
+
701
+ // Deprecated alias; this alias was deprecated because it represents extra API
702
+ // surface that makes it hard for people to understand what macro to use.
703
+ // Use TORCH_CHECK(false, ...) or TORCH_INTERNAL_ASSERT(false, ...) to
704
+ // unconditionally fail at a line of code.
705
+ #define AT_ERROR(...) \
706
+ do { \
707
+ ::c10::detail::deprecated_AT_ERROR(); \
708
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
709
+ } while (false)
710
+
711
+ #endif // C10_UTIL_EXCEPTION_H_
venv/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_FBCODEMAPS_H_
2
+ #define C10_UTIL_FBCODEMAPS_H_
3
+
4
+ // Map typedefs so that we can use folly's F14 maps in fbcode without
5
+ // taking a folly dependency.
6
+
7
+ #ifdef FBCODE_CAFFE2
8
+ #include <folly/container/F14Map.h>
9
+ #include <folly/container/F14Set.h>
10
+ #else
11
+ #include <unordered_map>
12
+ #include <unordered_set>
13
+ #endif
14
+
15
+ namespace c10 {
16
+ #ifdef FBCODE_CAFFE2
17
+ template <typename Key, typename Value>
18
+ using FastMap = folly::F14FastMap<Key, Value>;
19
+ template <typename Key>
20
+ using FastSet = folly::F14FastSet<Key>;
21
+ #else
22
+ template <typename Key, typename Value>
23
+ using FastMap = std::unordered_map<Key, Value>;
24
+ template <typename Key>
25
+ using FastSet = std::unordered_set<Key>;
26
+ #endif
27
+ } // namespace c10
28
+
29
+ #endif // C10_UTIL_FBCODEMAPS_H_
venv/lib/python3.10/site-packages/torch/include/c10/util/Flags.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_FLAGS_H_
2
+ #define C10_UTIL_FLAGS_H_
3
+
4
+ /* Commandline flags support for C10.
5
+ *
6
+ * This is a portable commandline flags tool for c10, so we can optionally
7
+ * choose to use gflags or a lightweight custom implementation if gflags is
8
+ * not possible on a certain platform. If you have gflags installed, set the
9
+ * macro C10_USE_GFLAGS will seamlessly route everything to gflags.
10
+ *
11
+ * To define a flag foo of type bool default to true, do the following in the
12
+ * *global* namespace:
13
+ * C10_DEFINE_bool(foo, true, "An example.");
14
+ *
15
+ * To use it in another .cc file, you can use C10_DECLARE_* as follows:
16
+ * C10_DECLARE_bool(foo);
17
+ *
18
+ * In both cases, you can then access the flag via FLAGS_foo.
19
+ *
20
+ * It is recommended that you build with gflags. To learn more about the flags
21
+ * usage, refer to the gflags page here:
22
+ *
23
+ * https://gflags.github.io/gflags/
24
+ *
25
+ * Note about Python users / devs: gflags is initiated from a C++ function
26
+ * ParseCommandLineFlags, and is usually done in native binaries in the main
27
+ * function. As Python does not have a modifiable main function, it is usually
28
+ * difficult to change the flags after Python starts. Hence, it is recommended
29
+ * that one sets the default value of the flags to one that's acceptable in
30
+ * general - that will allow Python to run without wrong flags.
31
+ */
32
+
33
+ #include <c10/macros/Export.h>
34
+ #include <string>
35
+
36
+ #include <c10/util/Registry.h>
37
+
38
+ namespace c10 {
39
+ /**
40
+ * Sets the usage message when a commandline tool is called with "--help".
41
+ */
42
+ C10_API void SetUsageMessage(const std::string& str);
43
+
44
+ /**
45
+ * Returns the usage message for the commandline tool set by SetUsageMessage.
46
+ */
47
+ C10_API const char* UsageMessage();
48
+
49
+ /**
50
+ * Parses the commandline flags.
51
+ *
52
+ * This command parses all the commandline arguments passed in via pargc
53
+ * and argv. Once it is finished, partc and argv will contain the remaining
54
+ * commandline args that c10 does not deal with. Note that following
55
+ * convention, argv[0] contains the binary name and is not parsed.
56
+ */
57
+ C10_API bool ParseCommandLineFlags(int* pargc, char*** pargv);
58
+
59
+ /**
60
+ * Checks if the commandline flags has already been passed.
61
+ */
62
+ C10_API bool CommandLineFlagsHasBeenParsed();
63
+
64
+ } // namespace c10
65
+
66
+ ////////////////////////////////////////////////////////////////////////////////
67
+ // Below are gflags and non-gflags specific implementations.
68
+ // In general, they define the following macros for one to declare (use
69
+ // C10_DECLARE) or define (use C10_DEFINE) flags:
70
+ // C10_{DECLARE,DEFINE}_{int,int64,double,bool,string}
71
+ ////////////////////////////////////////////////////////////////////////////////
72
+
73
+ #ifdef C10_USE_GFLAGS
74
+
75
+ ////////////////////////////////////////////////////////////////////////////////
76
+ // Begin gflags section: most functions are basically rerouted to gflags.
77
+ ////////////////////////////////////////////////////////////////////////////////
78
+ #include <gflags/gflags.h>
79
+
80
+ // C10 uses hidden visibility by default. However, in gflags, it only uses
81
+ // export on Windows platform (with dllexport) but not on linux/mac (with
82
+ // default visibility). As a result, to ensure that we are always exporting
83
+ // global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we
84
+ // are building C10 as a shared library.
85
+ // This has to be done after the inclusion of gflags, because some early
86
+ // versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the
87
+ // macros, so we need to do definition after gflags is done.
88
+ #ifdef GFLAGS_DLL_DEFINE_FLAG
89
+ #undef GFLAGS_DLL_DEFINE_FLAG
90
+ #endif // GFLAGS_DLL_DEFINE_FLAG
91
+ #ifdef GFLAGS_DLL_DECLARE_FLAG
92
+ #undef GFLAGS_DLL_DECLARE_FLAG
93
+ #endif // GFLAGS_DLL_DECLARE_FLAG
94
+ #define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT
95
+ #define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT
96
+
97
+ // gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags.
98
+ // Using GFLAGS_GFLAGS_H_ to capture this change.
99
+ #ifndef GFLAGS_GFLAGS_H_
100
+ namespace gflags = google;
101
+ #endif // GFLAGS_GFLAGS_H_
102
+
103
+ // Motivation about the gflags wrapper:
104
+ // (1) We would need to make sure that the gflags version and the non-gflags
105
+ // version of C10 are going to expose the same flags abstraction. One should
106
+ // explicitly use FLAGS_flag_name to access the flags.
107
+ // (2) For flag names, it is recommended to start with c10_ to distinguish it
108
+ // from regular gflags flags. For example, do
109
+ // C10_DEFINE_BOOL(c10_my_flag, true, "An example");
110
+ // to allow one to use FLAGS_c10_my_flag.
111
+ // (3) Gflags has a design issue that does not properly expose the global flags,
112
+ // if one builds the library with -fvisibility=hidden. The current gflags (as of
113
+ // Aug 2018) only deals with the Windows case using dllexport, and not the Linux
114
+ // counterparts. As a result, we will explicitly use C10_EXPORT to export the
115
+ // flags defined in C10. This is done via a global reference, so the flag
116
+ // itself is not duplicated - under the hood it is the same global gflags flag.
117
+ #define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) \
118
+ DEFINE_##type(name, default_value, help_str);
119
+
120
+ #define C10_DEFINE_int(name, default_value, help_str) \
121
+ C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str)
122
+ #define C10_DEFINE_int32(name, default_value, help_str) \
123
+ C10_DEFINE_int(name, default_value, help_str)
124
+ #define C10_DEFINE_int64(name, default_value, help_str) \
125
+ C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str)
126
+ #define C10_DEFINE_double(name, default_value, help_str) \
127
+ C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str)
128
+ #define C10_DEFINE_bool(name, default_value, help_str) \
129
+ C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str)
130
+ #define C10_DEFINE_string(name, default_value, help_str) \
131
+ C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str)
132
+
133
+ // DECLARE_typed_var should be used in header files and in the global namespace.
134
+ #define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name);
135
+
136
+ #define C10_DECLARE_int(name) \
137
+ C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name)
138
+ #define C10_DECLARE_int32(name) C10_DECLARE_int(name)
139
+ #define C10_DECLARE_int64(name) \
140
+ C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name)
141
+ #define C10_DECLARE_double(name) \
142
+ C10_GFLAGS_DECLARE_WRAPPER(double, double, name)
143
+ #define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name)
144
+ #define C10_DECLARE_string(name) \
145
+ C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name)
146
+
147
+ ////////////////////////////////////////////////////////////////////////////////
148
+ // End gflags section.
149
+ ////////////////////////////////////////////////////////////////////////////////
150
+
151
+ #else // C10_USE_GFLAGS
152
+
153
+ ////////////////////////////////////////////////////////////////////////////////
154
+ // Begin non-gflags section: providing equivalent functionality.
155
+ ////////////////////////////////////////////////////////////////////////////////
156
+
157
+ namespace c10 {
158
+
159
+ class C10_API C10FlagParser {
160
+ public:
161
+ bool success() {
162
+ return success_;
163
+ }
164
+
165
+ protected:
166
+ template <typename T>
167
+ bool Parse(const std::string& content, T* value);
168
+ bool success_{false};
169
+ };
170
+
171
+ C10_DECLARE_REGISTRY(C10FlagsRegistry, C10FlagParser, const std::string&);
172
+
173
+ } // namespace c10
174
+
175
+ // The macros are defined outside the c10 namespace. In your code, you should
176
+ // write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace
177
+ // as well.
178
+
179
+ #define C10_DEFINE_typed_var(type, name, default_value, help_str) \
180
+ C10_EXPORT type FLAGS_##name = default_value; \
181
+ namespace c10 { \
182
+ namespace { \
183
+ class C10FlagParser_##name : public C10FlagParser { \
184
+ public: \
185
+ explicit C10FlagParser_##name(const std::string& content) { \
186
+ success_ = C10FlagParser::Parse<type>(content, &FLAGS_##name); \
187
+ } \
188
+ }; \
189
+ } \
190
+ RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( \
191
+ #name, \
192
+ C10FlagsRegistry(), \
193
+ RegistererC10FlagsRegistry::DefaultCreator<C10FlagParser_##name>, \
194
+ "(" #type ", default " #default_value ") " help_str); \
195
+ }
196
+
197
+ #define C10_DEFINE_int(name, default_value, help_str) \
198
+ C10_DEFINE_typed_var(int, name, default_value, help_str)
199
+ #define C10_DEFINE_int32(name, default_value, help_str) \
200
+ C10_DEFINE_int(name, default_value, help_str)
201
+ #define C10_DEFINE_int64(name, default_value, help_str) \
202
+ C10_DEFINE_typed_var(int64_t, name, default_value, help_str)
203
+ #define C10_DEFINE_double(name, default_value, help_str) \
204
+ C10_DEFINE_typed_var(double, name, default_value, help_str)
205
+ #define C10_DEFINE_bool(name, default_value, help_str) \
206
+ C10_DEFINE_typed_var(bool, name, default_value, help_str)
207
+ #define C10_DEFINE_string(name, default_value, help_str) \
208
+ C10_DEFINE_typed_var(std::string, name, default_value, help_str)
209
+
210
+ // DECLARE_typed_var should be used in header files and in the global namespace.
211
+ #define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name
212
+
213
+ #define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name)
214
+ #define C10_DECLARE_int32(name) C10_DECLARE_int(name)
215
+ #define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name)
216
+ #define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name)
217
+ #define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name)
218
+ #define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name)
219
+
220
+ ////////////////////////////////////////////////////////////////////////////////
221
+ // End non-gflags section.
222
+ ////////////////////////////////////////////////////////////////////////////////
223
+
224
+ #endif // C10_USE_GFLAGS
225
+
226
+ #endif // C10_UTIL_FLAGS_H_
venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e4m3fn type (8-bit floating-point) including conversions
4
+ /// to standard C types and basic arithmetic operations. Note that arithmetic
5
+ /// operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration:
8
+ /// s eeee mmm
9
+ /// 1 sign bit
10
+ /// 4 exponent bits
11
+ /// 3 mantissa bits
12
+ /// bias = 7
13
+ ///
14
+ /// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf
15
+ /// and inspired by Half implementation from pytorch/c10/util/Half.h
16
+
17
+ #include <c10/macros/Macros.h>
18
+ #include <c10/util/TypeSafeSignMath.h>
19
+ #include <c10/util/floating_point_utils.h>
20
+ #include <type_traits>
21
+
22
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
23
+ #include <cmath>
24
+ #include <cstdint>
25
+ #elif !defined(__OPENCL_VERSION__)
26
+ #include <math.h>
27
+ #include <stdint.h>
28
+ #endif
29
+
30
+ #ifdef _MSC_VER
31
+ #include <intrin.h>
32
+ #endif
33
+
34
+ #include <climits>
35
+ #include <cstdint>
36
+ #include <cstring>
37
+ #include <iosfwd>
38
+ #include <limits>
39
+ #include <sstream>
40
+ #include <stdexcept>
41
+ #include <string>
42
+ #include <utility>
43
+
44
+ #include <typeinfo> // operator typeid
45
+
46
+ namespace c10 {
47
+
48
+ namespace detail {
49
+
50
+ /*
51
+ * Convert a 8-bit floating-point number in fp8 E4M3FN format, in bit
52
+ * representation, to a 32-bit floating-point number in IEEE single-precision
53
+ * format, in bit representation.
54
+ *
55
+ * @note The implementation doesn't use any floating-point operations.
56
+ */
57
+ inline C10_HOST_DEVICE float fp8e4m3fn_to_fp32_value(uint8_t input) {
58
+ /*
59
+ * Extend the fp8 E4M3FN number to 32 bits and shift to the
60
+ * upper part of the 32-bit word:
61
+ * +---+----+---+-----------------------------+
62
+ * | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
63
+ * +---+----+---+-----------------------------+
64
+ * Bits 31 27-30 24-26 0-23
65
+ *
66
+ * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
67
+ * - zero bits.
68
+ */
69
+ const uint32_t w = (uint32_t)input << 24;
70
+ /*
71
+ * Extract the sign of the input number into the high bit of the 32-bit word:
72
+ *
73
+ * +---+----------------------------------+
74
+ * | S |0000000 00000000 00000000 00000000|
75
+ * +---+----------------------------------+
76
+ * Bits 31 0-31
77
+ */
78
+ const uint32_t sign = w & UINT32_C(0x80000000);
79
+ /*
80
+ * Extract mantissa and biased exponent of the input number into the bits 0-30
81
+ * of the 32-bit word:
82
+ *
83
+ * +---+----+---+-----------------------------+
84
+ * | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
85
+ * +---+----+---+-----------------------------+
86
+ * Bits 31 27-30 24-26 0-23
87
+ */
88
+ const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
89
+ /*
90
+ * Renorm shift is the number of bits to shift mantissa left to make the
91
+ * half-precision number normalized. If the initial number is normalized, some
92
+ * of its high 5 bits (sign == 0 and 4-bit exponent) equals one. In this case
93
+ * renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
94
+ * that if we shift denormalized nonsign by renorm_shift, the unit bit of
95
+ * mantissa will shift into exponent, turning the biased exponent into 1, and
96
+ * making mantissa normalized (i.e. without leading 1).
97
+ */
98
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
99
+ uint32_t renorm_shift = __clz(nonsign);
100
+ #elif defined(__SYCL_DEVICE_ONLY__)
101
+ // Note: zero is not a supported input into `__builtin_clz`
102
+ uint32_t renorm_shift =
103
+ nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
104
+ #elif defined(_MSC_VER)
105
+ unsigned long nonsign_bsr;
106
+ _BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
107
+ uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
108
+ #else
109
+ // Note: zero is not a supported input into `__builtin_clz`
110
+ uint32_t renorm_shift =
111
+ nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
112
+ #endif
113
+ renorm_shift = renorm_shift > 4 ? renorm_shift - 4 : 0;
114
+ /*
115
+ * Iff fp8e4m3fn number has all exponent and mantissa bits set to 1,
116
+ * the addition overflows it into bit 31, and the subsequent shift turns the
117
+ * high 9 bits into 1. Thus inf_nan_mask == 0x7F800000 if the fp8e4m3fn number
118
+ * is Nan, 0x00000000 otherwise
119
+ */
120
+ const int32_t inf_nan_mask =
121
+ ((int32_t)(nonsign + 0x01000000) >> 8) & INT32_C(0x7F800000);
122
+ /*
123
+ * Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
124
+ * into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
125
+ * broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
126
+ * 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
127
+ * 0x00000000 otherwise
128
+ */
129
+ const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
130
+ /*
131
+ * 1. Shift nonsign left by renorm_shift to normalize it (if the input
132
+ * was denormal)
133
+ * 2. Shift nonsign right by 4 so the exponent (4 bits originally)
134
+ * becomes an 8-bit field and 3-bit mantissa shifts into the 3 high
135
+ * bits of the 23-bit mantissa of IEEE single-precision number.
136
+ * 3. Add 0x78 to the exponent (starting at bit 23) to compensate the
137
+ * different in exponent bias (0x7F for single-precision number less 0x07
138
+ * for fp8e4m3fn number).
139
+ * 4. Subtract renorm_shift from the exponent (starting at bit 23) to
140
+ * account for renormalization. As renorm_shift is less than 0x78, this
141
+ * can be combined with step 3.
142
+ * 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
143
+ * input was NaN or infinity.
144
+ * 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
145
+ * into zero if the input was zero.
146
+ * 7. Combine with the sign of the input number.
147
+ */
148
+ uint32_t result = sign |
149
+ ((((nonsign << renorm_shift >> 4) + ((0x78 - renorm_shift) << 23)) |
150
+ inf_nan_mask) &
151
+ ~zero_mask);
152
+ return fp32_from_bits(result);
153
+ }
154
+
155
+ /*
156
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
157
+ * 8-bit floating-point number in fp8 E4M3FN format, in bit representation.
158
+ */
159
+ inline C10_HOST_DEVICE uint8_t fp8e4m3fn_from_fp32_value(float f) {
160
+ /*
161
+ * Binary representation of 480.0f, which is the first value
162
+ * not representable in fp8e4m3fn range:
163
+ * 0 1111 111 - fp8e4m3fn
164
+ * 0 10000111 11100000000000000000000 - fp32
165
+ */
166
+ constexpr uint32_t fp8_max = UINT32_C(1087) << 20;
167
+
168
+ /*
169
+ * A mask for converting fp32 numbers lower than fp8e4m3fn normal range
170
+ * into denorm representation
171
+ * magic number: ((127 - 7) + (23 - 3) + 1)
172
+ */
173
+ constexpr uint32_t denorm_mask = UINT32_C(141) << 23;
174
+
175
+ uint32_t f_bits = fp32_to_bits(f);
176
+
177
+ uint8_t result = 0u;
178
+
179
+ /*
180
+ * Extract the sign of the input number into the high bit of the 32-bit word:
181
+ *
182
+ * +---+----------------------------------+
183
+ * | S |0000000 00000000 00000000 00000000|
184
+ * +---+----------------------------------+
185
+ * Bits 31 0-31
186
+ */
187
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
188
+
189
+ /*
190
+ * Set sign bit to 0
191
+ */
192
+ f_bits ^= sign;
193
+
194
+ if (f_bits >= fp8_max) {
195
+ // NaN - all exponent and mantissa bits set to 1
196
+ result = 0x7f;
197
+ } else {
198
+ if (f_bits < (UINT32_C(121) << 23)) {
199
+ // Input number is smaller than 2^(-6), which is the smallest
200
+ // fp8e4m3fn normal number
201
+ f_bits =
202
+ fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
203
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
204
+ } else {
205
+ // resulting mantissa is odd
206
+ uint8_t mant_odd = (f_bits >> 20) & 1;
207
+
208
+ // update exponent, rounding bias part 1
209
+ f_bits += ((uint32_t)(7 - 127) << 23) + 0x7FFFF;
210
+
211
+ // rounding bias part 2
212
+ f_bits += mant_odd;
213
+
214
+ // take the bits!
215
+ result = static_cast<uint8_t>(f_bits >> 20);
216
+ }
217
+ }
218
+
219
+ result |= static_cast<uint8_t>(sign >> 24);
220
+ return result;
221
+ }
222
+
223
+ } // namespace detail
224
+
225
+ struct alignas(1) Float8_e4m3fn {
226
+ uint8_t x;
227
+
228
+ struct from_bits_t {};
229
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
230
+ return from_bits_t();
231
+ }
232
+
233
+ Float8_e4m3fn() = default;
234
+
235
+ constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t)
236
+ : x(bits){};
237
+ inline C10_HOST_DEVICE Float8_e4m3fn(float value);
238
+ inline C10_HOST_DEVICE operator float() const;
239
+ inline C10_HOST_DEVICE bool isnan() const;
240
+ };
241
+
242
+ C10_API std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value);
243
+
244
+ } // namespace c10
245
+
246
+ #include <c10/util/Float8_e4m3fn-inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e4m3fnuz type (8-bit floating-point) including
4
+ /// conversions to standard C types and basic arithmetic operations. Note that
5
+ /// arithmetic operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration remains the same as Float8_e4m3fn:
8
+ /// s eeee mmm
9
+ /// 1 sign bit
10
+ /// 4 exponent bits
11
+ /// 3 mantissa bits
12
+ /// The key differences versus Float8_e4m3fn are:
13
+ /// bias = 8
14
+ /// no infinities or negative zero
15
+ /// NaN only when sign bit is 1, rest all 0s
16
+ ///
17
+ /// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and
18
+ /// the existing Float8_e4m3fn implementation.
19
+
20
+ #include <c10/macros/Macros.h>
21
+ #include <c10/util/TypeSafeSignMath.h>
22
+ #include <c10/util/floating_point_utils.h>
23
+ #include <type_traits>
24
+
25
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
26
+ #include <cstdint>
27
+ #elif !defined(__OPENCL_VERSION__)
28
+ #include <math.h>
29
+ #include <stdint.h>
30
+ #endif
31
+
32
+ #include <iosfwd>
33
+ #include <ostream>
34
+
35
+ namespace c10 {
36
+
37
+ namespace detail {
38
+
39
+ /*
40
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
41
+ * 8-bit floating-point number in fp8 E4M3FNUZ format, in bit representation.
42
+ */
43
+ inline C10_HOST_DEVICE uint8_t fp8e4m3fnuz_from_fp32_value(float f) {
44
+ /*
45
+ * Binary representation of 256.0f, which is the first value not representable
46
+ * (i.e. the first value which would overflow in to the sign bit, resulting in
47
+ * a NaN) in fp8e4m3fnuz range:
48
+ * 1 0000 000 - fp8e4m3fnuz
49
+ * 0 10000111 00000000000000000000000 - fp32
50
+ */
51
+ constexpr uint32_t fnuz_max = UINT32_C(0x87) << 23;
52
+
53
+ /*
54
+ * A mask for converting fp32 numbers lower than fp8e4m3fnuz normal range
55
+ * into denorm representation
56
+ * magic number: ((127 - 8) + (23 - 3) + 1)
57
+ */
58
+ constexpr uint32_t denorm_mask = UINT32_C(0x8C) << 23;
59
+
60
+ uint32_t f_bits = fp32_to_bits(f);
61
+
62
+ uint32_t result = 0u;
63
+
64
+ /*
65
+ * Extract the sign of the input number into the high bit of the 32-bit word:
66
+ *
67
+ * +---+----------------------------------+
68
+ * | S |0000000 00000000 00000000 00000000|
69
+ * +---+----------------------------------+
70
+ * Bits 31 0-31
71
+ */
72
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
73
+
74
+ /*
75
+ * Set sign bit to 0
76
+ */
77
+ f_bits ^= sign;
78
+
79
+ if (f_bits >= fnuz_max) {
80
+ // NaN -- sign bit set to 1, rest 0s.
81
+ return 0x80;
82
+ }
83
+
84
+ if (f_bits < (UINT32_C(0x78) << 23) /* 2^-7 in float32 */) {
85
+ // Input exponent is less than -7, the smallest e4m3fnuz exponent, so the
86
+ // number will become subnormal.
87
+ f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
88
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
89
+ if (result == 0) {
90
+ // fnuz types don't have negative zero.
91
+ return 0;
92
+ }
93
+ } else {
94
+ // resulting mantissa is odd
95
+ uint8_t mant_odd = (f_bits >> 20) & 1;
96
+
97
+ // update exponent, rounding bias part 1
98
+ f_bits += ((uint32_t)(8 - 127) << 23) + 0x7FFFF;
99
+
100
+ // rounding bias part 2
101
+ f_bits += mant_odd;
102
+
103
+ // take the bits!
104
+ result = static_cast<uint8_t>(f_bits >> 20);
105
+ }
106
+
107
+ result |= sign >> 24;
108
+ return result;
109
+ }
110
+
111
+ } // namespace detail
112
+
113
+ struct alignas(1) Float8_e4m3fnuz {
114
+ uint8_t x;
115
+
116
+ struct from_bits_t {};
117
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
118
+ return from_bits_t();
119
+ }
120
+
121
+ Float8_e4m3fnuz() = default;
122
+
123
+ constexpr C10_HOST_DEVICE Float8_e4m3fnuz(uint8_t bits, from_bits_t)
124
+ : x(bits){};
125
+ inline C10_HOST_DEVICE Float8_e4m3fnuz(float value);
126
+ inline C10_HOST_DEVICE operator float() const;
127
+ inline C10_HOST_DEVICE bool isnan() const;
128
+ };
129
+
130
+ C10_API std::ostream& operator<<(
131
+ std::ostream& out,
132
+ const Float8_e4m3fnuz& value);
133
+
134
+ } // namespace c10
135
+
136
+ #include <c10/util/Float8_e4m3fnuz-inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <cstring>
5
+ #include <limits>
6
+
7
+ C10_CLANG_DIAGNOSTIC_PUSH()
8
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
9
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
10
+ #endif
11
+
12
+ #define EXP_WIDTH_FP8 5
13
+ #define MAN_WIDTH_FP8 2
14
+ #define EXP_BIAS_FP8 15
15
+
16
+ namespace c10 {
17
+
18
+ /// Constructors
19
+
20
+ inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value)
21
+ : x(detail::fp8e5m2_from_fp32_value(value)) {}
22
+
23
+ /// Implicit conversions
24
+
25
+ inline C10_HOST_DEVICE Float8_e5m2::operator float() const {
26
+ return detail::fp8e5m2_to_fp32_value(x);
27
+ }
28
+
29
+ /// Special values helpers
30
+
31
+ inline C10_HOST_DEVICE bool Float8_e5m2::isnan() const {
32
+ return (x & 0b01111111) > 0b01111100;
33
+ }
34
+
35
+ inline C10_HOST_DEVICE bool Float8_e5m2::isinf() const {
36
+ return (x & 0b01111111) == 0b01111100;
37
+ }
38
+
39
+ /// Arithmetic
40
+
41
+ inline C10_HOST_DEVICE Float8_e5m2
42
+ operator+(const Float8_e5m2& a, const Float8_e5m2& b) {
43
+ return static_cast<float>(a) + static_cast<float>(b);
44
+ }
45
+
46
+ inline C10_HOST_DEVICE Float8_e5m2
47
+ operator-(const Float8_e5m2& a, const Float8_e5m2& b) {
48
+ return static_cast<float>(a) - static_cast<float>(b);
49
+ }
50
+
51
+ inline C10_HOST_DEVICE Float8_e5m2
52
+ operator*(const Float8_e5m2& a, const Float8_e5m2& b) {
53
+ return static_cast<float>(a) * static_cast<float>(b);
54
+ }
55
+
56
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(
57
+ const Float8_e5m2& a,
58
+ const Float8_e5m2& b) __ubsan_ignore_float_divide_by_zero__ {
59
+ return static_cast<float>(a) / static_cast<float>(b);
60
+ }
61
+
62
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(const Float8_e5m2& a) {
63
+ return -static_cast<float>(a);
64
+ }
65
+
66
+ inline C10_HOST_DEVICE Float8_e5m2& operator+=(
67
+ Float8_e5m2& a,
68
+ const Float8_e5m2& b) {
69
+ a = a + b;
70
+ return a;
71
+ }
72
+
73
+ inline C10_HOST_DEVICE Float8_e5m2& operator-=(
74
+ Float8_e5m2& a,
75
+ const Float8_e5m2& b) {
76
+ a = a - b;
77
+ return a;
78
+ }
79
+
80
+ inline C10_HOST_DEVICE Float8_e5m2& operator*=(
81
+ Float8_e5m2& a,
82
+ const Float8_e5m2& b) {
83
+ a = a * b;
84
+ return a;
85
+ }
86
+
87
+ inline C10_HOST_DEVICE Float8_e5m2& operator/=(
88
+ Float8_e5m2& a,
89
+ const Float8_e5m2& b) {
90
+ a = a / b;
91
+ return a;
92
+ }
93
+
94
+ /// Arithmetic with floats
95
+
96
+ inline C10_HOST_DEVICE float operator+(Float8_e5m2 a, float b) {
97
+ return static_cast<float>(a) + b;
98
+ }
99
+ inline C10_HOST_DEVICE float operator-(Float8_e5m2 a, float b) {
100
+ return static_cast<float>(a) - b;
101
+ }
102
+ inline C10_HOST_DEVICE float operator*(Float8_e5m2 a, float b) {
103
+ return static_cast<float>(a) * b;
104
+ }
105
+ inline C10_HOST_DEVICE float operator/(Float8_e5m2 a, float b)
106
+ __ubsan_ignore_float_divide_by_zero__ {
107
+ return static_cast<float>(a) / b;
108
+ }
109
+
110
+ inline C10_HOST_DEVICE float operator+(float a, Float8_e5m2 b) {
111
+ return a + static_cast<float>(b);
112
+ }
113
+ inline C10_HOST_DEVICE float operator-(float a, Float8_e5m2 b) {
114
+ return a - static_cast<float>(b);
115
+ }
116
+ inline C10_HOST_DEVICE float operator*(float a, Float8_e5m2 b) {
117
+ return a * static_cast<float>(b);
118
+ }
119
+ inline C10_HOST_DEVICE float operator/(float a, Float8_e5m2 b)
120
+ __ubsan_ignore_float_divide_by_zero__ {
121
+ return a / static_cast<float>(b);
122
+ }
123
+
124
+ inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e5m2& b) {
125
+ return a += static_cast<float>(b);
126
+ }
127
+ inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e5m2& b) {
128
+ return a -= static_cast<float>(b);
129
+ }
130
+ inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e5m2& b) {
131
+ return a *= static_cast<float>(b);
132
+ }
133
+ inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e5m2& b) {
134
+ return a /= static_cast<float>(b);
135
+ }
136
+
137
+ /// Arithmetic with doubles
138
+
139
+ inline C10_HOST_DEVICE double operator+(Float8_e5m2 a, double b) {
140
+ return static_cast<double>(a) + b;
141
+ }
142
+ inline C10_HOST_DEVICE double operator-(Float8_e5m2 a, double b) {
143
+ return static_cast<double>(a) - b;
144
+ }
145
+ inline C10_HOST_DEVICE double operator*(Float8_e5m2 a, double b) {
146
+ return static_cast<double>(a) * b;
147
+ }
148
+ inline C10_HOST_DEVICE double operator/(Float8_e5m2 a, double b)
149
+ __ubsan_ignore_float_divide_by_zero__ {
150
+ return static_cast<double>(a) / b;
151
+ }
152
+
153
+ inline C10_HOST_DEVICE double operator+(double a, Float8_e5m2 b) {
154
+ return a + static_cast<double>(b);
155
+ }
156
+ inline C10_HOST_DEVICE double operator-(double a, Float8_e5m2 b) {
157
+ return a - static_cast<double>(b);
158
+ }
159
+ inline C10_HOST_DEVICE double operator*(double a, Float8_e5m2 b) {
160
+ return a * static_cast<double>(b);
161
+ }
162
+ inline C10_HOST_DEVICE double operator/(double a, Float8_e5m2 b)
163
+ __ubsan_ignore_float_divide_by_zero__ {
164
+ return a / static_cast<double>(b);
165
+ }
166
+
167
+ /// Arithmetic with ints
168
+
169
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int b) {
170
+ return a + static_cast<Float8_e5m2>(b);
171
+ }
172
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int b) {
173
+ return a - static_cast<Float8_e5m2>(b);
174
+ }
175
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int b) {
176
+ return a * static_cast<Float8_e5m2>(b);
177
+ }
178
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int b) {
179
+ return a / static_cast<Float8_e5m2>(b);
180
+ }
181
+
182
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(int a, Float8_e5m2 b) {
183
+ return static_cast<Float8_e5m2>(a) + b;
184
+ }
185
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(int a, Float8_e5m2 b) {
186
+ return static_cast<Float8_e5m2>(a) - b;
187
+ }
188
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(int a, Float8_e5m2 b) {
189
+ return static_cast<Float8_e5m2>(a) * b;
190
+ }
191
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(int a, Float8_e5m2 b) {
192
+ return static_cast<Float8_e5m2>(a) / b;
193
+ }
194
+
195
+ //// Arithmetic with int64_t
196
+
197
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int64_t b) {
198
+ return a + static_cast<Float8_e5m2>(b);
199
+ }
200
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int64_t b) {
201
+ return a - static_cast<Float8_e5m2>(b);
202
+ }
203
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int64_t b) {
204
+ return a * static_cast<Float8_e5m2>(b);
205
+ }
206
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int64_t b) {
207
+ return a / static_cast<Float8_e5m2>(b);
208
+ }
209
+
210
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(int64_t a, Float8_e5m2 b) {
211
+ return static_cast<Float8_e5m2>(a) + b;
212
+ }
213
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(int64_t a, Float8_e5m2 b) {
214
+ return static_cast<Float8_e5m2>(a) - b;
215
+ }
216
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(int64_t a, Float8_e5m2 b) {
217
+ return static_cast<Float8_e5m2>(a) * b;
218
+ }
219
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(int64_t a, Float8_e5m2 b) {
220
+ return static_cast<Float8_e5m2>(a) / b;
221
+ }
222
+
223
+ /// NOTE: we do not define comparisons directly and instead rely on the implicit
224
+ /// conversion from c10::Float8_e5m2 to float.
225
+
226
+ } // namespace c10
227
+
228
+ namespace std {
229
+
230
+ template <>
231
+ class numeric_limits<c10::Float8_e5m2> {
232
+ public:
233
+ static constexpr bool is_signed = true;
234
+ static constexpr bool is_integer = false;
235
+ static constexpr bool is_specialized = true;
236
+ static constexpr bool is_exact = false;
237
+ static constexpr bool has_infinity = true;
238
+ static constexpr bool has_quiet_NaN = false;
239
+ static constexpr bool has_signaling_NaN = false;
240
+ static constexpr auto has_denorm = true;
241
+ static constexpr auto has_denorm_loss = true;
242
+ static constexpr auto round_style = numeric_limits<float>::round_style;
243
+ static constexpr bool is_iec559 = false;
244
+ static constexpr bool is_bounded = true;
245
+ static constexpr bool is_modulo = false;
246
+ static constexpr int digits = 3;
247
+ static constexpr int digits10 = 0;
248
+ static constexpr int max_digits10 = 2;
249
+ static constexpr int radix = 2;
250
+ static constexpr int min_exponent = -13;
251
+ static constexpr int min_exponent10 = -4;
252
+ static constexpr int max_exponent = 16;
253
+ static constexpr int max_exponent10 = 4;
254
+ static constexpr auto traps = numeric_limits<float>::traps;
255
+ static constexpr auto tinyness_before =
256
+ numeric_limits<float>::tinyness_before;
257
+
258
+ static constexpr c10::Float8_e5m2 min() {
259
+ return c10::Float8_e5m2(0x4, c10::Float8_e5m2::from_bits());
260
+ }
261
+ static constexpr c10::Float8_e5m2 max() {
262
+ return c10::Float8_e5m2(0x7B, c10::Float8_e5m2::from_bits());
263
+ }
264
+ static constexpr c10::Float8_e5m2 lowest() {
265
+ return c10::Float8_e5m2(0xFB, c10::Float8_e5m2::from_bits());
266
+ }
267
+ static constexpr c10::Float8_e5m2 epsilon() {
268
+ return c10::Float8_e5m2(0x34, c10::Float8_e5m2::from_bits());
269
+ }
270
+ static constexpr c10::Float8_e5m2 round_error() {
271
+ return c10::Float8_e5m2(0x38, c10::Float8_e5m2::from_bits());
272
+ }
273
+ static constexpr c10::Float8_e5m2 infinity() {
274
+ return c10::Float8_e5m2(0x7C, c10::Float8_e5m2::from_bits());
275
+ }
276
+ static constexpr c10::Float8_e5m2 denorm_min() {
277
+ return c10::Float8_e5m2(0x01, c10::Float8_e5m2::from_bits());
278
+ }
279
+ };
280
+
281
+ } // namespace std
282
+
283
+ C10_CLANG_DIAGNOSTIC_POP()
venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e5m2 type (8-bit floating-point) including conversions
4
+ /// to standard C types and basic arithmetic operations. Note that arithmetic
5
+ /// operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration:
8
+ /// s eeeee mm
9
+ /// 1 sign bit
10
+ /// 5 exponent bits
11
+ /// 2 mantissa bits
12
+ /// bias = 15
13
+ ///
14
+ /// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf
15
+ /// and inspired by Half implementation from pytorch/c10/util/Half.h
16
+
17
+ #include <c10/util/Half.h>
18
+
19
+ namespace c10 {
20
+
21
+ namespace detail {
22
+
23
+ /*
24
+ * Convert a 8-bit floating-point number in fp8 E5M2 format, in bit
25
+ * representation, to a 32-bit floating-point number in IEEE single-precision
26
+ * format, in bit representation.
27
+ *
28
+ * @note The implementation doesn't use any floating-point operations.
29
+ */
30
+ inline C10_HOST_DEVICE float fp8e5m2_to_fp32_value(uint8_t input) {
31
+ /*
32
+ * Extend the fp8 E5M2 number to 32 bits and shift to the
33
+ * upper part of the 32-bit word:
34
+ * +---+----+---+-----------------------------+
35
+ * | S |EEEEE|MM|0000 0000 0000 0000 0000 0000|
36
+ * +---+----+---+-----------------------------+
37
+ * Bits 31 26-30 24-25 0-23
38
+ *
39
+ * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
40
+ * - zero bits.
41
+ */
42
+ uint16_t half_representation = input;
43
+ half_representation <<= 8;
44
+ return fp16_ieee_to_fp32_value(half_representation);
45
+ }
46
+
47
+ /*
48
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
49
+ * 8-bit floating-point number in fp8 E5M2 format, in bit representation.
50
+ */
51
+ inline C10_HOST_DEVICE uint8_t fp8e5m2_from_fp32_value(float f) {
52
+ /*
53
+ * Binary representation of fp32 infinity
54
+ * 0 11111111 00000000000000000000000
55
+ */
56
+ constexpr uint32_t fp32_inf = UINT32_C(255) << 23;
57
+
58
+ /*
59
+ * Binary representation of 65536.0f, which is the first value
60
+ * not representable in fp8e5m2 range:
61
+ * 0 11111 00 - fp8e5m2
62
+ * 0 10001111 00000000000000000000000 - fp32
63
+ */
64
+ constexpr uint32_t fp8_max = UINT32_C(143) << 23;
65
+
66
+ /*
67
+ * A mask for converting fp32 numbers lower than fp8e5m2 normal range
68
+ * into denorm representation
69
+ * magic number: ((127 - 15) + (23 - 2) + 1)
70
+ */
71
+ constexpr uint32_t denorm_mask = UINT32_C(134) << 23;
72
+
73
+ uint32_t f_bits = fp32_to_bits(f);
74
+ uint8_t result = 0u;
75
+
76
+ /*
77
+ * Extract the sign of the input number into the high bit of the 32-bit word:
78
+ *
79
+ * +---+----------------------------------+
80
+ * | S |0000000 00000000 00000000 00000000|
81
+ * +---+----------------------------------+
82
+ * Bits 31 0-31
83
+ */
84
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
85
+
86
+ /*
87
+ * Set sign bit to 0
88
+ */
89
+ f_bits ^= sign;
90
+
91
+ if (f_bits >= fp8_max) {
92
+ // NaN - all exponent and mantissa bits set to 1
93
+ result = f_bits > fp32_inf ? UINT8_C(0x7F) : UINT8_C(0x7C);
94
+ } else {
95
+ if (f_bits < (UINT32_C(113) << 23)) {
96
+ // Input number is smaller than 2^(-14), which is the smallest
97
+ // fp8e5m2 normal number
98
+ f_bits =
99
+ fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
100
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
101
+ } else {
102
+ // resulting mantissa is odd
103
+ uint32_t mant_odd = (f_bits >> 21) & 1;
104
+
105
+ // update exponent, rounding bias part 1
106
+ f_bits += ((uint32_t)(15 - 127) << 23) + 0xFFFFF;
107
+
108
+ // rounding bias part 2
109
+ f_bits += mant_odd;
110
+
111
+ // take the bits!
112
+ result = static_cast<uint8_t>(f_bits >> 21);
113
+ }
114
+ }
115
+
116
+ result |= static_cast<uint8_t>(sign >> 24);
117
+ return result;
118
+ }
119
+
120
+ } // namespace detail
121
+
122
+ struct alignas(1) Float8_e5m2 {
123
+ uint8_t x;
124
+
125
+ struct from_bits_t {};
126
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
127
+ return from_bits_t();
128
+ }
129
+
130
+ Float8_e5m2() = default;
131
+
132
+ constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t) : x(bits) {}
133
+ inline C10_HOST_DEVICE Float8_e5m2(float value);
134
+ inline C10_HOST_DEVICE operator float() const;
135
+ inline C10_HOST_DEVICE bool isnan() const;
136
+ inline C10_HOST_DEVICE bool isinf() const;
137
+ };
138
+
139
+ C10_API std::ostream& operator<<(std::ostream& out, const Float8_e5m2& value);
140
+
141
+ } // namespace c10
142
+
143
+ #include <c10/util/Float8_e5m2-inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e5m2fnuz type (8-bit floating-point) including
4
+ /// conversions to standard C types and basic arithmetic operations. Note that
5
+ /// arithmetic operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration remains the same as e5m2:
8
+ /// s eeeee mm
9
+ /// 1 sign bit
10
+ /// 5 exponent bits
11
+ /// 2 mantissa bits
12
+ /// The key differences that e5m2fnuz brings are:
13
+ /// bias = 16
14
+ /// no infinities or negative zero
15
+ /// NaN only when sign bit is 1, rest all 0s
16
+ ///
17
+ /// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and
18
+ /// the existing Float8_e4m3fn implementation.
19
+
20
+ #include <c10/macros/Macros.h>
21
+ #include <c10/util/TypeSafeSignMath.h>
22
+ #include <c10/util/floating_point_utils.h>
23
+
24
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
25
+ #include <cstdint>
26
+ #elif !defined(__OPENCL_VERSION__)
27
+ #include <math.h>
28
+ #include <stdint.h>
29
+ #endif
30
+
31
+ #include <iosfwd>
32
+ #include <ostream>
33
+
34
+ namespace c10 {
35
+
36
+ namespace detail {
37
+
38
+ /*
39
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
40
+ * 8-bit floating-point number in fp8 E5M2 format, in bit representation.
41
+ */
42
+ inline C10_HOST_DEVICE uint8_t fp8e5m2fnuz_from_fp32_value(float f) {
43
+ /*
44
+ * Binary representation of 65536.0f, which is the first value not
45
+ * representable (i.e. the first value which would overflow in to the sign
46
+ * bit, resulting in a NaN) in fp8e4m3fnuz range:
47
+ * 1 00000 00 - fp8e5m2fnuz
48
+ * 0 10001111 00000000000000000000000 - fp32
49
+ */
50
+ constexpr uint32_t fnuz_max = UINT32_C(0x8F) << 23;
51
+
52
+ /*
53
+ * A mask for converting fp32 numbers lower than fp8e5m2fnuz normal range
54
+ * into denormalized representation.
55
+ * magic number: ((127 - 16) + (23 - 2) + 1)
56
+ */
57
+ constexpr uint32_t denorm_mask = UINT32_C(0x85) << 23;
58
+
59
+ uint32_t f_bits = fp32_to_bits(f);
60
+ uint32_t result = 0u;
61
+
62
+ /*
63
+ * Extract the sign of the input number into the high bit of the 32-bit word:
64
+ *
65
+ * +---+----------------------------------+
66
+ * | S |0000000 00000000 00000000 00000000|
67
+ * +---+----------------------------------+
68
+ * Bits 31 0-31
69
+ */
70
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
71
+
72
+ /*
73
+ * Set sign bit to 0
74
+ */
75
+ f_bits ^= sign;
76
+
77
+ if (f_bits >= fnuz_max) {
78
+ // NaN -- sign bit set to 1, rest 0s
79
+ return 0x80;
80
+ }
81
+
82
+ if (f_bits < (UINT32_C(0x70) << 23) /* 2^-15 in float32 */) {
83
+ // Input exponent is less than -15, the smallest e5m2fnuz exponent, so the
84
+ // number will become subnormal.
85
+ f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
86
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
87
+ if (result == 0) {
88
+ // fnuz types don't have negative zero.
89
+ return 0;
90
+ }
91
+ } else {
92
+ // resulting mantissa is odd
93
+ uint8_t mant_odd = (f_bits >> 21) & 1;
94
+
95
+ // update exponent, rounding bias part 1
96
+ f_bits += ((uint32_t)(16 - 127) << 23) + 0xFFFFF;
97
+
98
+ // rounding bias part 2
99
+ f_bits += mant_odd;
100
+
101
+ // take the bits!
102
+ result = static_cast<uint8_t>(f_bits >> 21);
103
+ }
104
+
105
+ result |= sign >> 24;
106
+ return result;
107
+ }
108
+
109
+ } // namespace detail
110
+
111
+ struct alignas(1) Float8_e5m2fnuz {
112
+ uint8_t x;
113
+
114
+ struct from_bits_t {};
115
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
116
+ return from_bits_t();
117
+ }
118
+
119
+ Float8_e5m2fnuz() = default;
120
+
121
+ constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t)
122
+ : x(bits) {}
123
+ inline C10_HOST_DEVICE Float8_e5m2fnuz(float value);
124
+ inline C10_HOST_DEVICE operator float() const;
125
+ inline C10_HOST_DEVICE bool isnan() const;
126
+ inline C10_HOST_DEVICE bool isinf() const;
127
+ };
128
+
129
+ C10_API std::ostream& operator<<(
130
+ std::ostream& out,
131
+ const Float8_e5m2fnuz& value);
132
+
133
+ } // namespace c10
134
+
135
+ #include <c10/util/Float8_e5m2fnuz-inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/bit_cast.h>
5
+
6
+ #include <cstring>
7
+ #include <limits>
8
+
9
+ #ifdef __CUDACC__
10
+ #include <cuda_fp16.h>
11
+ #endif
12
+
13
+ #ifdef __HIPCC__
14
+ #include <hip/hip_fp16.h>
15
+ #endif
16
+
17
+ #if defined(CL_SYCL_LANGUAGE_VERSION)
18
+ #include <CL/sycl.hpp> // for SYCL 1.2.1
19
+ #elif defined(SYCL_LANGUAGE_VERSION)
20
+ #include <sycl/sycl.hpp> // for SYCL 2020
21
+ #endif
22
+
23
+ #if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
24
+ !defined(__APPLE__)
25
+ #include <ATen/cpu/vec/vec_half.h>
26
+ #endif
27
+
28
+ C10_CLANG_DIAGNOSTIC_PUSH()
29
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
30
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
31
+ #endif
32
+
33
+ namespace c10 {
34
+
35
+ #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
36
+ /// Constructors
37
+ inline Half::Half(float16_t value) : x(detail::fp16_to_bits(value)) {}
38
+ inline Half::operator float16_t() const {
39
+ return detail::fp16_from_bits(x);
40
+ }
41
+ #else
42
+
43
+ inline C10_HOST_DEVICE Half::Half(float value)
44
+ :
45
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
46
+ x(__half_as_short(__float2half(value)))
47
+ #elif defined(__SYCL_DEVICE_ONLY__)
48
+ x(c10::bit_cast<uint16_t>(sycl::half(value)))
49
+ #elif (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
50
+ !defined(__APPLE__)
51
+ x(at::vec::float2half_scalar(value))
52
+ #else
53
+ x(detail::fp16_ieee_from_fp32_value(value))
54
+ #endif
55
+ {
56
+ }
57
+
58
+ /// Implicit conversions
59
+
60
+ inline C10_HOST_DEVICE Half::operator float() const {
61
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
62
+ return __half2float(*reinterpret_cast<const __half*>(&x));
63
+ #elif defined(__SYCL_DEVICE_ONLY__)
64
+ return float(c10::bit_cast<sycl::half>(x));
65
+ #elif (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
66
+ !defined(__APPLE__)
67
+ return at::vec::half2float_scalar(x);
68
+ #elif defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
69
+ return detail::native_fp16_to_fp32_value(x);
70
+ #else
71
+ return detail::fp16_ieee_to_fp32_value(x);
72
+ #endif
73
+ }
74
+
75
+ #endif /* !defined(__aarch64__) || defined(C10_MOBILE) || defined(__CUDACC__) \
76
+ */
77
+
78
+ #if defined(__CUDACC__) || defined(__HIPCC__)
79
+ inline C10_HOST_DEVICE Half::Half(const __half& value) {
80
+ x = *reinterpret_cast<const unsigned short*>(&value);
81
+ }
82
+ inline C10_HOST_DEVICE Half::operator __half() const {
83
+ return *reinterpret_cast<const __half*>(&x);
84
+ }
85
+ #endif
86
+
87
+ #ifdef SYCL_LANGUAGE_VERSION
88
+ inline C10_HOST_DEVICE Half::Half(const sycl::half& value) {
89
+ x = *reinterpret_cast<const unsigned short*>(&value);
90
+ }
91
+ inline C10_HOST_DEVICE Half::operator sycl::half() const {
92
+ return *reinterpret_cast<const sycl::half*>(&x);
93
+ }
94
+ #endif
95
+
96
+ // CUDA intrinsics
97
+
98
+ #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || \
99
+ (defined(__clang__) && defined(__CUDA__))
100
+ inline __device__ Half __ldg(const Half* ptr) {
101
+ return __ldg(reinterpret_cast<const __half*>(ptr));
102
+ }
103
+ #endif
104
+
105
+ /// Arithmetic
106
+
107
+ inline C10_HOST_DEVICE Half operator+(const Half& a, const Half& b) {
108
+ return static_cast<float>(a) + static_cast<float>(b);
109
+ }
110
+
111
+ inline C10_HOST_DEVICE Half operator-(const Half& a, const Half& b) {
112
+ return static_cast<float>(a) - static_cast<float>(b);
113
+ }
114
+
115
+ inline C10_HOST_DEVICE Half operator*(const Half& a, const Half& b) {
116
+ return static_cast<float>(a) * static_cast<float>(b);
117
+ }
118
+
119
+ inline C10_HOST_DEVICE Half operator/(const Half& a, const Half& b)
120
+ __ubsan_ignore_float_divide_by_zero__ {
121
+ return static_cast<float>(a) / static_cast<float>(b);
122
+ }
123
+
124
+ inline C10_HOST_DEVICE Half operator-(const Half& a) {
125
+ #if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530) || \
126
+ defined(__HIP_DEVICE_COMPILE__)
127
+ return __hneg(a);
128
+ #elif defined(__SYCL_DEVICE_ONLY__)
129
+ return -c10::bit_cast<sycl::half>(a);
130
+ #else
131
+ return -static_cast<float>(a);
132
+ #endif
133
+ }
134
+
135
+ inline C10_HOST_DEVICE Half& operator+=(Half& a, const Half& b) {
136
+ a = a + b;
137
+ return a;
138
+ }
139
+
140
+ inline C10_HOST_DEVICE Half& operator-=(Half& a, const Half& b) {
141
+ a = a - b;
142
+ return a;
143
+ }
144
+
145
+ inline C10_HOST_DEVICE Half& operator*=(Half& a, const Half& b) {
146
+ a = a * b;
147
+ return a;
148
+ }
149
+
150
+ inline C10_HOST_DEVICE Half& operator/=(Half& a, const Half& b) {
151
+ a = a / b;
152
+ return a;
153
+ }
154
+
155
+ /// Arithmetic with floats
156
+
157
+ inline C10_HOST_DEVICE float operator+(Half a, float b) {
158
+ return static_cast<float>(a) + b;
159
+ }
160
+ inline C10_HOST_DEVICE float operator-(Half a, float b) {
161
+ return static_cast<float>(a) - b;
162
+ }
163
+ inline C10_HOST_DEVICE float operator*(Half a, float b) {
164
+ return static_cast<float>(a) * b;
165
+ }
166
+ inline C10_HOST_DEVICE float operator/(Half a, float b)
167
+ __ubsan_ignore_float_divide_by_zero__ {
168
+ return static_cast<float>(a) / b;
169
+ }
170
+
171
+ inline C10_HOST_DEVICE float operator+(float a, Half b) {
172
+ return a + static_cast<float>(b);
173
+ }
174
+ inline C10_HOST_DEVICE float operator-(float a, Half b) {
175
+ return a - static_cast<float>(b);
176
+ }
177
+ inline C10_HOST_DEVICE float operator*(float a, Half b) {
178
+ return a * static_cast<float>(b);
179
+ }
180
+ inline C10_HOST_DEVICE float operator/(float a, Half b)
181
+ __ubsan_ignore_float_divide_by_zero__ {
182
+ return a / static_cast<float>(b);
183
+ }
184
+
185
+ inline C10_HOST_DEVICE float& operator+=(float& a, const Half& b) {
186
+ return a += static_cast<float>(b);
187
+ }
188
+ inline C10_HOST_DEVICE float& operator-=(float& a, const Half& b) {
189
+ return a -= static_cast<float>(b);
190
+ }
191
+ inline C10_HOST_DEVICE float& operator*=(float& a, const Half& b) {
192
+ return a *= static_cast<float>(b);
193
+ }
194
+ inline C10_HOST_DEVICE float& operator/=(float& a, const Half& b) {
195
+ return a /= static_cast<float>(b);
196
+ }
197
+
198
+ /// Arithmetic with doubles
199
+
200
+ inline C10_HOST_DEVICE double operator+(Half a, double b) {
201
+ return static_cast<double>(a) + b;
202
+ }
203
+ inline C10_HOST_DEVICE double operator-(Half a, double b) {
204
+ return static_cast<double>(a) - b;
205
+ }
206
+ inline C10_HOST_DEVICE double operator*(Half a, double b) {
207
+ return static_cast<double>(a) * b;
208
+ }
209
+ inline C10_HOST_DEVICE double operator/(Half a, double b)
210
+ __ubsan_ignore_float_divide_by_zero__ {
211
+ return static_cast<double>(a) / b;
212
+ }
213
+
214
+ inline C10_HOST_DEVICE double operator+(double a, Half b) {
215
+ return a + static_cast<double>(b);
216
+ }
217
+ inline C10_HOST_DEVICE double operator-(double a, Half b) {
218
+ return a - static_cast<double>(b);
219
+ }
220
+ inline C10_HOST_DEVICE double operator*(double a, Half b) {
221
+ return a * static_cast<double>(b);
222
+ }
223
+ inline C10_HOST_DEVICE double operator/(double a, Half b)
224
+ __ubsan_ignore_float_divide_by_zero__ {
225
+ return a / static_cast<double>(b);
226
+ }
227
+
228
+ /// Arithmetic with ints
229
+
230
+ inline C10_HOST_DEVICE Half operator+(Half a, int b) {
231
+ return a + static_cast<Half>(b);
232
+ }
233
+ inline C10_HOST_DEVICE Half operator-(Half a, int b) {
234
+ return a - static_cast<Half>(b);
235
+ }
236
+ inline C10_HOST_DEVICE Half operator*(Half a, int b) {
237
+ return a * static_cast<Half>(b);
238
+ }
239
+ inline C10_HOST_DEVICE Half operator/(Half a, int b) {
240
+ return a / static_cast<Half>(b);
241
+ }
242
+
243
+ inline C10_HOST_DEVICE Half operator+(int a, Half b) {
244
+ return static_cast<Half>(a) + b;
245
+ }
246
+ inline C10_HOST_DEVICE Half operator-(int a, Half b) {
247
+ return static_cast<Half>(a) - b;
248
+ }
249
+ inline C10_HOST_DEVICE Half operator*(int a, Half b) {
250
+ return static_cast<Half>(a) * b;
251
+ }
252
+ inline C10_HOST_DEVICE Half operator/(int a, Half b) {
253
+ return static_cast<Half>(a) / b;
254
+ }
255
+
256
+ //// Arithmetic with int64_t
257
+
258
+ inline C10_HOST_DEVICE Half operator+(Half a, int64_t b) {
259
+ return a + static_cast<Half>(b);
260
+ }
261
+ inline C10_HOST_DEVICE Half operator-(Half a, int64_t b) {
262
+ return a - static_cast<Half>(b);
263
+ }
264
+ inline C10_HOST_DEVICE Half operator*(Half a, int64_t b) {
265
+ return a * static_cast<Half>(b);
266
+ }
267
+ inline C10_HOST_DEVICE Half operator/(Half a, int64_t b) {
268
+ return a / static_cast<Half>(b);
269
+ }
270
+
271
+ inline C10_HOST_DEVICE Half operator+(int64_t a, Half b) {
272
+ return static_cast<Half>(a) + b;
273
+ }
274
+ inline C10_HOST_DEVICE Half operator-(int64_t a, Half b) {
275
+ return static_cast<Half>(a) - b;
276
+ }
277
+ inline C10_HOST_DEVICE Half operator*(int64_t a, Half b) {
278
+ return static_cast<Half>(a) * b;
279
+ }
280
+ inline C10_HOST_DEVICE Half operator/(int64_t a, Half b) {
281
+ return static_cast<Half>(a) / b;
282
+ }
283
+
284
+ /// NOTE: we do not define comparisons directly and instead rely on the implicit
285
+ /// conversion from c10::Half to float.
286
+
287
+ } // namespace c10
288
+
289
+ namespace std {
290
+
291
+ template <>
292
+ class numeric_limits<c10::Half> {
293
+ public:
294
+ static constexpr bool is_specialized = true;
295
+ static constexpr bool is_signed = true;
296
+ static constexpr bool is_integer = false;
297
+ static constexpr bool is_exact = false;
298
+ static constexpr bool has_infinity = true;
299
+ static constexpr bool has_quiet_NaN = true;
300
+ static constexpr bool has_signaling_NaN = true;
301
+ static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
302
+ static constexpr auto has_denorm_loss =
303
+ numeric_limits<float>::has_denorm_loss;
304
+ static constexpr auto round_style = numeric_limits<float>::round_style;
305
+ static constexpr bool is_iec559 = true;
306
+ static constexpr bool is_bounded = true;
307
+ static constexpr bool is_modulo = false;
308
+ static constexpr int digits = 11;
309
+ static constexpr int digits10 = 3;
310
+ static constexpr int max_digits10 = 5;
311
+ static constexpr int radix = 2;
312
+ static constexpr int min_exponent = -13;
313
+ static constexpr int min_exponent10 = -4;
314
+ static constexpr int max_exponent = 16;
315
+ static constexpr int max_exponent10 = 4;
316
+ static constexpr auto traps = numeric_limits<float>::traps;
317
+ static constexpr auto tinyness_before =
318
+ numeric_limits<float>::tinyness_before;
319
+ static constexpr c10::Half min() {
320
+ return c10::Half(0x0400, c10::Half::from_bits());
321
+ }
322
+ static constexpr c10::Half lowest() {
323
+ return c10::Half(0xFBFF, c10::Half::from_bits());
324
+ }
325
+ static constexpr c10::Half max() {
326
+ return c10::Half(0x7BFF, c10::Half::from_bits());
327
+ }
328
+ static constexpr c10::Half epsilon() {
329
+ return c10::Half(0x1400, c10::Half::from_bits());
330
+ }
331
+ static constexpr c10::Half round_error() {
332
+ return c10::Half(0x3800, c10::Half::from_bits());
333
+ }
334
+ static constexpr c10::Half infinity() {
335
+ return c10::Half(0x7C00, c10::Half::from_bits());
336
+ }
337
+ static constexpr c10::Half quiet_NaN() {
338
+ return c10::Half(0x7E00, c10::Half::from_bits());
339
+ }
340
+ static constexpr c10::Half signaling_NaN() {
341
+ return c10::Half(0x7D00, c10::Half::from_bits());
342
+ }
343
+ static constexpr c10::Half denorm_min() {
344
+ return c10::Half(0x0001, c10::Half::from_bits());
345
+ }
346
+ };
347
+
348
+ } // namespace std
349
+
350
+ C10_CLANG_DIAGNOSTIC_POP()
venv/lib/python3.10/site-packages/torch/include/c10/util/Half.h ADDED
@@ -0,0 +1,538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Half type (half-precision floating-point) including conversions
4
+ /// to standard C types and basic arithmetic operations. Note that arithmetic
5
+ /// operations are implemented by converting to floating point and
6
+ /// performing the operation in float32, instead of using CUDA half intrinsics.
7
+ /// Most uses of this type within ATen are memory bound, including the
8
+ /// element-wise kernels, and the half intrinsics aren't efficient on all GPUs.
9
+ /// If you are writing a compute bound kernel, you can use the CUDA half
10
+ /// intrinsics directly on the Half type from device code.
11
+
12
+ #include <c10/macros/Export.h>
13
+ #include <c10/macros/Macros.h>
14
+ #include <c10/util/TypeSafeSignMath.h>
15
+ #include <c10/util/complex.h>
16
+ #include <c10/util/floating_point_utils.h>
17
+ #include <type_traits>
18
+
19
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
20
+ #include <cmath>
21
+ #elif !defined(__OPENCL_VERSION__)
22
+ #include <math.h>
23
+ #endif
24
+
25
+ #ifdef _MSC_VER
26
+ #include <intrin.h>
27
+ #endif
28
+
29
+ #include <cstdint>
30
+ #include <cstring>
31
+ #include <iosfwd>
32
+ #include <limits>
33
+
34
+ #ifdef __CUDACC__
35
+ #include <cuda_fp16.h>
36
+ #endif
37
+
38
+ #ifdef __HIPCC__
39
+ #include <hip/hip_fp16.h>
40
+ #endif
41
+
42
+ #if defined(CL_SYCL_LANGUAGE_VERSION)
43
+ #include <CL/sycl.hpp> // for SYCL 1.2.1
44
+ #elif defined(SYCL_LANGUAGE_VERSION)
45
+ #include <sycl/sycl.hpp> // for SYCL 2020
46
+ #endif
47
+
48
+ #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
49
+ #include <arm_neon.h>
50
+ #endif
51
+
52
+ namespace c10 {
53
+
54
+ namespace detail {
55
+
56
+ /*
57
+ * Convert a 16-bit floating-point number in IEEE half-precision format, in bit
58
+ * representation, to a 32-bit floating-point number in IEEE single-precision
59
+ * format, in bit representation.
60
+ *
61
+ * @note The implementation doesn't use any floating-point operations.
62
+ */
63
+ inline uint32_t fp16_ieee_to_fp32_bits(uint16_t h) {
64
+ /*
65
+ * Extend the half-precision floating-point number to 32 bits and shift to the
66
+ * upper part of the 32-bit word:
67
+ * +---+-----+------------+-------------------+
68
+ * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
69
+ * +---+-----+------------+-------------------+
70
+ * Bits 31 26-30 16-25 0-15
71
+ *
72
+ * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
73
+ * - zero bits.
74
+ */
75
+ const uint32_t w = (uint32_t)h << 16;
76
+ /*
77
+ * Extract the sign of the input number into the high bit of the 32-bit word:
78
+ *
79
+ * +---+----------------------------------+
80
+ * | S |0000000 00000000 00000000 00000000|
81
+ * +---+----------------------------------+
82
+ * Bits 31 0-31
83
+ */
84
+ const uint32_t sign = w & UINT32_C(0x80000000);
85
+ /*
86
+ * Extract mantissa and biased exponent of the input number into the bits 0-30
87
+ * of the 32-bit word:
88
+ *
89
+ * +---+-----+------------+-------------------+
90
+ * | 0 |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
91
+ * +---+-----+------------+-------------------+
92
+ * Bits 30 27-31 17-26 0-16
93
+ */
94
+ const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
95
+ /*
96
+ * Renorm shift is the number of bits to shift mantissa left to make the
97
+ * half-precision number normalized. If the initial number is normalized, some
98
+ * of its high 6 bits (sign == 0 and 5-bit exponent) equals one. In this case
99
+ * renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
100
+ * that if we shift denormalized nonsign by renorm_shift, the unit bit of
101
+ * mantissa will shift into exponent, turning the biased exponent into 1, and
102
+ * making mantissa normalized (i.e. without leading 1).
103
+ */
104
+ #ifdef _MSC_VER
105
+ unsigned long nonsign_bsr;
106
+ _BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
107
+ uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
108
+ #else
109
+ uint32_t renorm_shift = __builtin_clz(nonsign);
110
+ #endif
111
+ renorm_shift = renorm_shift > 5 ? renorm_shift - 5 : 0;
112
+ /*
113
+ * Iff half-precision number has exponent of 15, the addition overflows
114
+ * it into bit 31, and the subsequent shift turns the high 9 bits
115
+ * into 1. Thus inf_nan_mask == 0x7F800000 if the half-precision number
116
+ * had exponent of 15 (i.e. was NaN or infinity) 0x00000000 otherwise
117
+ */
118
+ const int32_t inf_nan_mask =
119
+ ((int32_t)(nonsign + 0x04000000) >> 8) & INT32_C(0x7F800000);
120
+ /*
121
+ * Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
122
+ * into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
123
+ * broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
124
+ * 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
125
+ * 0x00000000 otherwise
126
+ */
127
+ const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
128
+ /*
129
+ * 1. Shift nonsign left by renorm_shift to normalize it (if the input
130
+ * was denormal)
131
+ * 2. Shift nonsign right by 3 so the exponent (5 bits originally)
132
+ * becomes an 8-bit field and 10-bit mantissa shifts into the 10 high
133
+ * bits of the 23-bit mantissa of IEEE single-precision number.
134
+ * 3. Add 0x70 to the exponent (starting at bit 23) to compensate the
135
+ * different in exponent bias (0x7F for single-precision number less 0xF
136
+ * for half-precision number).
137
+ * 4. Subtract renorm_shift from the exponent (starting at bit 23) to
138
+ * account for renormalization. As renorm_shift is less than 0x70, this
139
+ * can be combined with step 3.
140
+ * 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
141
+ * input was NaN or infinity.
142
+ * 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
143
+ * into zero if the input was zero.
144
+ * 7. Combine with the sign of the input number.
145
+ */
146
+ return sign |
147
+ ((((nonsign << renorm_shift >> 3) + ((0x70 - renorm_shift) << 23)) |
148
+ inf_nan_mask) &
149
+ ~zero_mask);
150
+ }
151
+
152
+ /*
153
+ * Convert a 16-bit floating-point number in IEEE half-precision format, in bit
154
+ * representation, to a 32-bit floating-point number in IEEE single-precision
155
+ * format.
156
+ *
157
+ * @note The implementation relies on IEEE-like (no assumption about rounding
158
+ * mode and no operations on denormals) floating-point operations and bitcasts
159
+ * between integer and floating-point variables.
160
+ */
161
+ C10_HOST_DEVICE inline float fp16_ieee_to_fp32_value(uint16_t h) {
162
+ /*
163
+ * Extend the half-precision floating-point number to 32 bits and shift to the
164
+ * upper part of the 32-bit word:
165
+ * +---+-----+------------+-------------------+
166
+ * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
167
+ * +---+-----+------------+-------------------+
168
+ * Bits 31 26-30 16-25 0-15
169
+ *
170
+ * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
171
+ * - zero bits.
172
+ */
173
+ const uint32_t w = (uint32_t)h << 16;
174
+ /*
175
+ * Extract the sign of the input number into the high bit of the 32-bit word:
176
+ *
177
+ * +---+----------------------------------+
178
+ * | S |0000000 00000000 00000000 00000000|
179
+ * +---+----------------------------------+
180
+ * Bits 31 0-31
181
+ */
182
+ const uint32_t sign = w & UINT32_C(0x80000000);
183
+ /*
184
+ * Extract mantissa and biased exponent of the input number into the high bits
185
+ * of the 32-bit word:
186
+ *
187
+ * +-----+------------+---------------------+
188
+ * |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000|
189
+ * +-----+------------+---------------------+
190
+ * Bits 27-31 17-26 0-16
191
+ */
192
+ const uint32_t two_w = w + w;
193
+
194
+ /*
195
+ * Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become
196
+ * mantissa and exponent of a single-precision floating-point number:
197
+ *
198
+ * S|Exponent | Mantissa
199
+ * +-+---+-----+------------+----------------+
200
+ * |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000|
201
+ * +-+---+-----+------------+----------------+
202
+ * Bits | 23-31 | 0-22
203
+ *
204
+ * Next, there are some adjustments to the exponent:
205
+ * - The exponent needs to be corrected by the difference in exponent bias
206
+ * between single-precision and half-precision formats (0x7F - 0xF = 0x70)
207
+ * - Inf and NaN values in the inputs should become Inf and NaN values after
208
+ * conversion to the single-precision number. Therefore, if the biased
209
+ * exponent of the half-precision input was 0x1F (max possible value), the
210
+ * biased exponent of the single-precision output must be 0xFF (max possible
211
+ * value). We do this correction in two steps:
212
+ * - First, we adjust the exponent by (0xFF - 0x1F) = 0xE0 (see exp_offset
213
+ * below) rather than by 0x70 suggested by the difference in the exponent bias
214
+ * (see above).
215
+ * - Then we multiply the single-precision result of exponent adjustment by
216
+ * 2**(-112) to reverse the effect of exponent adjustment by 0xE0 less the
217
+ * necessary exponent adjustment by 0x70 due to difference in exponent bias.
218
+ * The floating-point multiplication hardware would ensure than Inf and
219
+ * NaN would retain their value on at least partially IEEE754-compliant
220
+ * implementations.
221
+ *
222
+ * Note that the above operations do not handle denormal inputs (where biased
223
+ * exponent == 0). However, they also do not operate on denormal inputs, and
224
+ * do not produce denormal results.
225
+ */
226
+ constexpr uint32_t exp_offset = UINT32_C(0xE0) << 23;
227
+ // const float exp_scale = 0x1.0p-112f;
228
+ constexpr uint32_t scale_bits = (uint32_t)15 << 23;
229
+ float exp_scale_val = 0;
230
+ std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
231
+ const float exp_scale = exp_scale_val;
232
+ const float normalized_value =
233
+ fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
234
+
235
+ /*
236
+ * Convert denormalized half-precision inputs into single-precision results
237
+ * (always normalized). Zero inputs are also handled here.
238
+ *
239
+ * In a denormalized number the biased exponent is zero, and mantissa has
240
+ * on-zero bits. First, we shift mantissa into bits 0-9 of the 32-bit word.
241
+ *
242
+ * zeros | mantissa
243
+ * +---------------------------+------------+
244
+ * |0000 0000 0000 0000 0000 00|MM MMMM MMMM|
245
+ * +---------------------------+------------+
246
+ * Bits 10-31 0-9
247
+ *
248
+ * Now, remember that denormalized half-precision numbers are represented as:
249
+ * FP16 = mantissa * 2**(-24).
250
+ * The trick is to construct a normalized single-precision number with the
251
+ * same mantissa and thehalf-precision input and with an exponent which would
252
+ * scale the corresponding mantissa bits to 2**(-24). A normalized
253
+ * single-precision floating-point number is represented as: FP32 = (1 +
254
+ * mantissa * 2**(-23)) * 2**(exponent - 127) Therefore, when the biased
255
+ * exponent is 126, a unit change in the mantissa of the input denormalized
256
+ * half-precision number causes a change of the constructed single-precision
257
+ * number by 2**(-24), i.e. the same amount.
258
+ *
259
+ * The last step is to adjust the bias of the constructed single-precision
260
+ * number. When the input half-precision number is zero, the constructed
261
+ * single-precision number has the value of FP32 = 1 * 2**(126 - 127) =
262
+ * 2**(-1) = 0.5 Therefore, we need to subtract 0.5 from the constructed
263
+ * single-precision number to get the numerical equivalent of the input
264
+ * half-precision number.
265
+ */
266
+ constexpr uint32_t magic_mask = UINT32_C(126) << 23;
267
+ constexpr float magic_bias = 0.5f;
268
+ const float denormalized_value =
269
+ fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
270
+
271
+ /*
272
+ * - Choose either results of conversion of input as a normalized number, or
273
+ * as a denormalized number, depending on the input exponent. The variable
274
+ * two_w contains input exponent in bits 27-31, therefore if its smaller than
275
+ * 2**27, the input is either a denormal number, or zero.
276
+ * - Combine the result of conversion of exponent and mantissa with the sign
277
+ * of the input number.
278
+ */
279
+ constexpr uint32_t denormalized_cutoff = UINT32_C(1) << 27;
280
+ const uint32_t result = sign |
281
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value)
282
+ : fp32_to_bits(normalized_value));
283
+ return fp32_from_bits(result);
284
+ }
285
+
286
+ /*
287
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
288
+ * 16-bit floating-point number in IEEE half-precision format, in bit
289
+ * representation.
290
+ *
291
+ * @note The implementation relies on IEEE-like (no assumption about rounding
292
+ * mode and no operations on denormals) floating-point operations and bitcasts
293
+ * between integer and floating-point variables.
294
+ */
295
+ inline uint16_t fp16_ieee_from_fp32_value(float f) {
296
+ // const float scale_to_inf = 0x1.0p+112f;
297
+ // const float scale_to_zero = 0x1.0p-110f;
298
+ constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
299
+ constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
300
+ float scale_to_inf_val = 0, scale_to_zero_val = 0;
301
+ std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
302
+ std::memcpy(
303
+ &scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
304
+ const float scale_to_inf = scale_to_inf_val;
305
+ const float scale_to_zero = scale_to_zero_val;
306
+
307
+ #if defined(_MSC_VER) && _MSC_VER == 1916
308
+ float base = ((signbit(f) != 0 ? -f : f) * scale_to_inf) * scale_to_zero;
309
+ #else
310
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
311
+ #endif
312
+
313
+ const uint32_t w = fp32_to_bits(f);
314
+ const uint32_t shl1_w = w + w;
315
+ const uint32_t sign = w & UINT32_C(0x80000000);
316
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
317
+ if (bias < UINT32_C(0x71000000)) {
318
+ bias = UINT32_C(0x71000000);
319
+ }
320
+
321
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
322
+ const uint32_t bits = fp32_to_bits(base);
323
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
324
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
325
+ const uint32_t nonsign = exp_bits + mantissa_bits;
326
+ return static_cast<uint16_t>(
327
+ (sign >> 16) |
328
+ (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign));
329
+ }
330
+
331
+ #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
332
+ constexpr inline float16_t fp16_from_bits(uint16_t h) {
333
+ union {
334
+ uint16_t as_bits;
335
+ float16_t as_value;
336
+ } fp16 = {h};
337
+ return fp16.as_value;
338
+ }
339
+
340
+ constexpr inline uint16_t fp16_to_bits(float16_t f) {
341
+ union {
342
+ float16_t as_value;
343
+ uint16_t as_bits;
344
+ } fp16 = {.as_value = f};
345
+ return fp16.as_bits;
346
+ }
347
+
348
+ // According to https://godbolt.org/z/8s14GvEjo it would translate to single
349
+ // fcvt s0, h0
350
+ inline float native_fp16_to_fp32_value(uint16_t h) {
351
+ return static_cast<float>(fp16_from_bits(h));
352
+ }
353
+
354
+ inline uint16_t native_fp16_from_fp32_value(float f) {
355
+ return fp16_to_bits(static_cast<float16_t>(f));
356
+ }
357
+ #endif
358
+
359
+ } // namespace detail
360
+
361
+ struct alignas(2) Half {
362
+ unsigned short x;
363
+
364
+ struct from_bits_t {};
365
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
366
+ return from_bits_t();
367
+ }
368
+
369
+ // HIP wants __host__ __device__ tag, CUDA does not
370
+ #if defined(USE_ROCM)
371
+ C10_HOST_DEVICE Half() = default;
372
+ #else
373
+ Half() = default;
374
+ #endif
375
+
376
+ constexpr C10_HOST_DEVICE Half(unsigned short bits, from_bits_t) : x(bits) {}
377
+ #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
378
+ inline Half(float16_t value);
379
+ inline operator float16_t() const;
380
+ #else
381
+ inline C10_HOST_DEVICE Half(float value);
382
+ inline C10_HOST_DEVICE operator float() const;
383
+ #endif
384
+
385
+ #if defined(__CUDACC__) || defined(__HIPCC__)
386
+ inline C10_HOST_DEVICE Half(const __half& value);
387
+ inline C10_HOST_DEVICE operator __half() const;
388
+ #endif
389
+ #ifdef SYCL_LANGUAGE_VERSION
390
+ inline C10_HOST_DEVICE Half(const sycl::half& value);
391
+ inline C10_HOST_DEVICE operator sycl::half() const;
392
+ #endif
393
+ };
394
+
395
+ // TODO : move to complex.h
396
+ template <>
397
+ struct alignas(4) complex<Half> {
398
+ Half real_;
399
+ Half imag_;
400
+
401
+ // Constructors
402
+ complex() = default;
403
+ // Half constructor is not constexpr so the following constructor can't
404
+ // be constexpr
405
+ C10_HOST_DEVICE explicit inline complex(const Half& real, const Half& imag)
406
+ : real_(real), imag_(imag) {}
407
+ C10_HOST_DEVICE inline complex(const c10::complex<float>& value)
408
+ : real_(value.real()), imag_(value.imag()) {}
409
+
410
+ // Conversion operator
411
+ inline C10_HOST_DEVICE operator c10::complex<float>() const {
412
+ return {real_, imag_};
413
+ }
414
+
415
+ constexpr C10_HOST_DEVICE Half real() const {
416
+ return real_;
417
+ }
418
+ constexpr C10_HOST_DEVICE Half imag() const {
419
+ return imag_;
420
+ }
421
+
422
+ C10_HOST_DEVICE complex<Half>& operator+=(const complex<Half>& other) {
423
+ real_ = static_cast<float>(real_) + static_cast<float>(other.real_);
424
+ imag_ = static_cast<float>(imag_) + static_cast<float>(other.imag_);
425
+ return *this;
426
+ }
427
+
428
+ C10_HOST_DEVICE complex<Half>& operator-=(const complex<Half>& other) {
429
+ real_ = static_cast<float>(real_) - static_cast<float>(other.real_);
430
+ imag_ = static_cast<float>(imag_) - static_cast<float>(other.imag_);
431
+ return *this;
432
+ }
433
+
434
+ C10_HOST_DEVICE complex<Half>& operator*=(const complex<Half>& other) {
435
+ auto a = static_cast<float>(real_);
436
+ auto b = static_cast<float>(imag_);
437
+ auto c = static_cast<float>(other.real());
438
+ auto d = static_cast<float>(other.imag());
439
+ real_ = a * c - b * d;
440
+ imag_ = a * d + b * c;
441
+ return *this;
442
+ }
443
+ };
444
+
445
+ // In some versions of MSVC, there will be a compiler error when building.
446
+ // C4146: unary minus operator applied to unsigned type, result still unsigned
447
+ // C4804: unsafe use of type 'bool' in operation
448
+ // It can be addressed by disabling the following warning.
449
+ #ifdef _MSC_VER
450
+ #pragma warning(push)
451
+ #pragma warning(disable : 4146)
452
+ #pragma warning(disable : 4804)
453
+ #pragma warning(disable : 4018)
454
+ #endif
455
+
456
+ // The overflow checks may involve float to int conversion which may
457
+ // trigger precision loss warning. Re-enable the warning once the code
458
+ // is fixed. See T58053069.
459
+ C10_CLANG_DIAGNOSTIC_PUSH()
460
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
461
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
462
+ #endif
463
+
464
+ // bool can be converted to any type.
465
+ // Without specializing on bool, in pytorch_linux_trusty_py2_7_9_build:
466
+ // `error: comparison of constant '255' with boolean expression is always false`
467
+ // for `f > limit::max()` below
468
+ template <typename To, typename From>
469
+ std::enable_if_t<std::is_same_v<From, bool>, bool> overflows(
470
+ From /*f*/,
471
+ bool strict_unsigned = false) {
472
+ return false;
473
+ }
474
+
475
+ // skip isnan and isinf check for integral types
476
+ template <typename To, typename From>
477
+ std::enable_if_t<std::is_integral_v<From> && !std::is_same_v<From, bool>, bool>
478
+ overflows(From f, bool strict_unsigned = false) {
479
+ using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
480
+ if constexpr (!limit::is_signed && std::numeric_limits<From>::is_signed) {
481
+ // allow for negative numbers to wrap using two's complement arithmetic.
482
+ // For example, with uint8, this allows for `a - b` to be treated as
483
+ // `a + 255 * b`.
484
+ if (!strict_unsigned) {
485
+ return greater_than_max<To>(f) ||
486
+ (c10::is_negative(f) &&
487
+ -static_cast<uint64_t>(f) > static_cast<uint64_t>(limit::max()));
488
+ }
489
+ }
490
+ return c10::less_than_lowest<To>(f) || greater_than_max<To>(f);
491
+ }
492
+
493
+ template <typename To, typename From>
494
+ std::enable_if_t<std::is_floating_point_v<From>, bool> overflows(
495
+ From f,
496
+ bool strict_unsigned = false) {
497
+ using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
498
+ if (limit::has_infinity && std::isinf(static_cast<double>(f))) {
499
+ return false;
500
+ }
501
+ if (!limit::has_quiet_NaN && (f != f)) {
502
+ return true;
503
+ }
504
+ return f < limit::lowest() || f > limit::max();
505
+ }
506
+
507
+ C10_CLANG_DIAGNOSTIC_POP()
508
+
509
+ #ifdef _MSC_VER
510
+ #pragma warning(pop)
511
+ #endif
512
+
513
+ template <typename To, typename From>
514
+ std::enable_if_t<is_complex<From>::value, bool> overflows(
515
+ From f,
516
+ bool strict_unsigned = false) {
517
+ // casts from complex to real are considered to overflow if the
518
+ // imaginary component is non-zero
519
+ if (!is_complex<To>::value && f.imag() != 0) {
520
+ return true;
521
+ }
522
+ // Check for overflow componentwise
523
+ // (Technically, the imag overflow check is guaranteed to be false
524
+ // when !is_complex<To>, but any optimizer worth its salt will be
525
+ // able to figure it out.)
526
+ return overflows<
527
+ typename scalar_value_type<To>::type,
528
+ typename From::value_type>(f.real()) ||
529
+ overflows<
530
+ typename scalar_value_type<To>::type,
531
+ typename From::value_type>(f.imag());
532
+ }
533
+
534
+ C10_API std::ostream& operator<<(std::ostream& out, const Half& value);
535
+
536
+ } // namespace c10
537
+
538
+ #include <c10/util/Half-inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <functional>
5
+ #include <utility>
6
+
7
+ namespace c10 {
8
+
9
+ /**
10
+ * This template simplifies generation of simple classes that wrap an id
11
+ * in a typesafe way. Namely, you can use it to create a very lightweight
12
+ * type that only offers equality comparators and hashing. Example:
13
+ *
14
+ * struct MyIdType final : IdWrapper<MyIdType, uint32_t> {
15
+ * constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {}
16
+ * };
17
+ *
18
+ * Then in the global top level namespace:
19
+ *
20
+ * C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType);
21
+ *
22
+ * That's it - equality operators and hash functions are automatically defined
23
+ * for you, given the underlying type supports it.
24
+ */
25
+ template <class ConcreteType, class UnderlyingType>
26
+ class IdWrapper {
27
+ public:
28
+ using underlying_type = UnderlyingType;
29
+ using concrete_type = ConcreteType;
30
+
31
+ protected:
32
+ constexpr explicit IdWrapper(underlying_type id) noexcept(
33
+ noexcept(underlying_type(std::declval<underlying_type>())))
34
+ : id_(id) {}
35
+
36
+ constexpr underlying_type underlyingId() const
37
+ noexcept(noexcept(underlying_type(std::declval<underlying_type>()))) {
38
+ return id_;
39
+ }
40
+
41
+ private:
42
+ friend size_t hash_value(const concrete_type& v) {
43
+ return std::hash<underlying_type>()(v.id_);
44
+ }
45
+
46
+ // TODO Making operator== noexcept if underlying type is noexcept equality
47
+ // comparable doesn't work with GCC 4.8.
48
+ // Fix this once we don't need GCC 4.8 anymore.
49
+ friend constexpr bool operator==(
50
+ const concrete_type& lhs,
51
+ const concrete_type& rhs) noexcept {
52
+ return lhs.id_ == rhs.id_;
53
+ }
54
+
55
+ // TODO Making operator!= noexcept if operator== is noexcept doesn't work with
56
+ // GCC 4.8.
57
+ // Fix this once we don't need GCC 4.8 anymore.
58
+ friend constexpr bool operator!=(
59
+ const concrete_type& lhs,
60
+ const concrete_type& rhs) noexcept {
61
+ return !(lhs == rhs);
62
+ }
63
+
64
+ underlying_type id_;
65
+ };
66
+
67
+ } // namespace c10
68
+
69
+ #define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) \
70
+ namespace std { \
71
+ template <> \
72
+ struct hash<ClassName> { \
73
+ size_t operator()(ClassName x) const { \
74
+ return hash_value(x); \
75
+ } \
76
+ }; \
77
+ }
venv/lib/python3.10/site-packages/torch/include/c10/util/Logging.h ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_LOGGING_H_
2
+ #define C10_UTIL_LOGGING_H_
3
+
4
+ #include <climits>
5
+ #include <exception>
6
+ #include <functional>
7
+ #include <limits>
8
+ #include <sstream>
9
+
10
+ #include <c10/macros/Macros.h>
11
+ #include <c10/util/Exception.h>
12
+ #include <c10/util/Flags.h>
13
+ #include <c10/util/StringUtil.h>
14
+
15
+ // CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off
16
+ // logging at compile time so no logging message below that level is produced
17
+ // at all. The value should be between INT_MIN and CAFFE_FATAL.
18
+ #ifndef CAFFE2_LOG_THRESHOLD
19
+ // If we have not defined the compile time log threshold, we keep all the
20
+ // log cases.
21
+ #define CAFFE2_LOG_THRESHOLD INT_MIN
22
+ #endif // CAFFE2_LOG_THRESHOLD
23
+
24
+ // Below are different implementations for glog and non-glog cases.
25
+ #ifdef C10_USE_GLOG
26
+ #include <c10/util/logging_is_google_glog.h>
27
+ #else // !C10_USE_GLOG
28
+ #include <c10/util/logging_is_not_google_glog.h>
29
+ #endif // C10_USE_GLOG
30
+
31
+ C10_DECLARE_int(caffe2_log_level);
32
+ C10_DECLARE_bool(caffe2_use_fatal_for_enforce);
33
+
34
+ // Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's
35
+ // not available - just short-circuit to the always working one one.
36
+ // We define the C10_ name to avoid confusing other files
37
+ #ifdef LOG_EVERY_MS
38
+ #define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms)
39
+ #else
40
+ #define C10_LOG_EVERY_MS(severity, ms) LOG(severity)
41
+ #endif
42
+
43
+ // Same for LOG_FIRST_N
44
+ #ifdef LOG_FIRST_N
45
+ #define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n)
46
+ #else
47
+ #define C10_LOG_FIRST_N(severity, n) LOG(severity)
48
+ #endif
49
+
50
+ // Same for LOG_EVERY_N
51
+ #ifdef LOG_EVERY_N
52
+ #define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n)
53
+ #else
54
+ #define C10_LOG_EVERY_N(severity, n) LOG(severity)
55
+ #endif
56
+
57
+ namespace c10 {
58
+
59
+ using std::string;
60
+
61
+ // Functions that we use for initialization.
62
+ C10_API bool InitCaffeLogging(int* argc, char** argv);
63
+ C10_API void UpdateLoggingLevelsFromFlags();
64
+
65
+ [[noreturn]] C10_API void ThrowEnforceNotMet(
66
+ const char* file,
67
+ const int line,
68
+ const char* condition,
69
+ const std::string& msg,
70
+ const void* caller = nullptr);
71
+
72
+ [[noreturn]] C10_API void ThrowEnforceNotMet(
73
+ const char* file,
74
+ const int line,
75
+ const char* condition,
76
+ const char* msg,
77
+ const void* caller = nullptr);
78
+
79
+ [[noreturn]] C10_API inline void ThrowEnforceNotMet(
80
+ const char* file,
81
+ const int line,
82
+ const char* condition,
83
+ detail::CompileTimeEmptyString /*msg*/,
84
+ const void* caller = nullptr) {
85
+ ThrowEnforceNotMet(file, line, condition, "", caller);
86
+ }
87
+
88
+ [[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
89
+ const char* file,
90
+ const int line,
91
+ const char* condition,
92
+ const std::string& msg,
93
+ const void* caller = nullptr);
94
+
95
+ [[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
96
+ const char* file,
97
+ const int line,
98
+ const char* condition,
99
+ const char* msg,
100
+ const void* caller = nullptr);
101
+
102
+ [[noreturn]] C10_API inline void ThrowEnforceFiniteNotMet(
103
+ const char* file,
104
+ const int line,
105
+ const char* condition,
106
+ detail::CompileTimeEmptyString /*msg*/,
107
+ const void* caller = nullptr) {
108
+ ThrowEnforceFiniteNotMet(file, line, condition, "", caller);
109
+ }
110
+
111
+ constexpr bool IsUsingGoogleLogging() {
112
+ #ifdef C10_USE_GLOG
113
+ return true;
114
+ #else
115
+ return false;
116
+ #endif
117
+ }
118
+
119
+ /**
120
+ * A utility to allow one to show log info to stderr after the program starts.
121
+ *
122
+ * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level
123
+ * to smaller than INFO. You are recommended to only use this in a few sparse
124
+ * cases, such as when you want to write a tutorial or something. Normally, use
125
+ * the commandline flags to set the log level.
126
+ */
127
+ C10_API void ShowLogInfoToStderr();
128
+
129
+ C10_API void SetStackTraceFetcher(std::function<string(void)> fetcher);
130
+
131
+ using EnforceNotMet = ::c10::Error;
132
+
133
+ #define CAFFE_ENFORCE(condition, ...) \
134
+ do { \
135
+ if (C10_UNLIKELY(!(condition))) { \
136
+ ::c10::ThrowEnforceNotMet( \
137
+ __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
138
+ } \
139
+ } while (false)
140
+
141
+ #define CAFFE_ENFORCE_FINITE(condition, ...) \
142
+ do { \
143
+ if (C10_UNLIKELY(!(condition))) { \
144
+ ::c10::ThrowEnforceFiniteNotMet( \
145
+ __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
146
+ } \
147
+ } while (false)
148
+
149
+ #define CAFFE_ENFORCE_WITH_CALLER(condition, ...) \
150
+ do { \
151
+ if (C10_UNLIKELY(!(condition))) { \
152
+ ::c10::ThrowEnforceNotMet( \
153
+ __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); \
154
+ } \
155
+ } while (false)
156
+
157
+ #define CAFFE_THROW(...) \
158
+ ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__))
159
+
160
+ /**
161
+ * Rich logging messages
162
+ *
163
+ * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that
164
+ * capture input argument values and add it to the exception message. E.g.
165
+ * `CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")`
166
+ * would evaluate both foo and bar only once and if the results are not equal -
167
+ * include them in the exception message.
168
+ *
169
+ * Some of the basic checker functions like Equals or Greater are already
170
+ * defined below. Other header might define customized checkers by adding
171
+ * functions to caffe2::enforce_detail namespace. For example:
172
+ *
173
+ * namespace caffe2 { namespace enforce_detail {
174
+ * inline EnforceFailMessage IsVector(const vector<int64_t>& shape) {
175
+ * if (shape.size() == 1) { return EnforceOK(); }
176
+ * return c10::str("Shape ", shape, " is not a vector");
177
+ * }
178
+ * }}
179
+ *
180
+ * With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))`
181
+ *
182
+ * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided
183
+ * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in
184
+ * user-provided input.
185
+ */
186
+
187
+ namespace enforce_detail {
188
+
189
+ template <typename T1, typename T2>
190
+ std::string enforceFailMsgImpl(const T1& x, const T2& y) {
191
+ return c10::str(x, " vs ", y);
192
+ }
193
+
194
+ template <typename T1, typename T2, typename... Args>
195
+ std::string enforceFailMsgImpl(const T1& x, const T2& y, const Args&... args) {
196
+ return c10::str(x, " vs ", y, ". ", args...);
197
+ }
198
+
199
+ template <typename Pred, typename T1, typename T2, typename GetFailMsgFunc>
200
+ void enforceThatImpl(
201
+ Pred p,
202
+ const T1& lhs,
203
+ const T2& rhs,
204
+ const char* file,
205
+ int line,
206
+ const char* expr,
207
+ const void* caller,
208
+ GetFailMsgFunc getFailMsg) {
209
+ if (C10_UNLIKELY(!(p(lhs, rhs)))) {
210
+ ::c10::ThrowEnforceNotMet(file, line, expr, getFailMsg(lhs, rhs), caller);
211
+ }
212
+ }
213
+
214
+ #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) \
215
+ ::c10::enforce_detail::enforceThatImpl( \
216
+ op, \
217
+ (lhs), \
218
+ (rhs), \
219
+ __FILE__, \
220
+ __LINE__, \
221
+ expr, \
222
+ nullptr, \
223
+ [&](const auto& arg1, const auto& arg2) { \
224
+ return ::c10::enforce_detail::enforceFailMsgImpl( \
225
+ arg1, arg2, ##__VA_ARGS__); \
226
+ })
227
+
228
+ #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) \
229
+ ::c10::enforce_detail::enforceThatImpl( \
230
+ op, \
231
+ (lhs), \
232
+ (rhs), \
233
+ __FILE__, \
234
+ __LINE__, \
235
+ expr, \
236
+ this, \
237
+ [&](const auto& arg1, const auto& arg2) { \
238
+ return ::c10::enforce_detail::enforceFailMsgImpl( \
239
+ arg1, arg2, ##__VA_ARGS__); \
240
+ })
241
+
242
+ } // namespace enforce_detail
243
+
244
+ #define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) \
245
+ CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__)
246
+
247
+ #define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) \
248
+ CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
249
+ #define CAFFE_ENFORCE_EQ(x, y, ...) \
250
+ CAFFE_ENFORCE_BINARY_OP(std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
251
+ #define CAFFE_ENFORCE_NE(x, y, ...) \
252
+ CAFFE_ENFORCE_BINARY_OP(std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
253
+ #define CAFFE_ENFORCE_LE(x, y, ...) \
254
+ CAFFE_ENFORCE_BINARY_OP(std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
255
+ #define CAFFE_ENFORCE_LT(x, y, ...) \
256
+ CAFFE_ENFORCE_BINARY_OP(std::less<void>(), <, x, y, ##__VA_ARGS__)
257
+ #define CAFFE_ENFORCE_GE(x, y, ...) \
258
+ CAFFE_ENFORCE_BINARY_OP(std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
259
+ #define CAFFE_ENFORCE_GT(x, y, ...) \
260
+ CAFFE_ENFORCE_BINARY_OP(std::greater<void>(), >, x, y, ##__VA_ARGS__)
261
+
262
+ #define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) \
263
+ CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( \
264
+ cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
265
+ #define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) \
266
+ CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
267
+ std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
268
+ #define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) \
269
+ CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
270
+ std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
271
+ #define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) \
272
+ CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
273
+ std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
274
+ #define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) \
275
+ CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less<void>(), <, x, y, ##__VA_ARGS__)
276
+ #define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) \
277
+ CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
278
+ std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
279
+ #define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) \
280
+ CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
281
+ std::greater<void>(), >, x, y, ##__VA_ARGS__)
282
+
283
+ /**
284
+ * Very lightweight logging for the first time API usage. It's beneficial for
285
+ * tracking of individual functionality usage in larger applications.
286
+ *
287
+ * In order to ensure light-weightedness of logging, we utilize static variable
288
+ * trick - LogAPIUsage will be invoked only once and further invocations will
289
+ * just do an atomic check.
290
+ *
291
+ * Example:
292
+ * // Logs caller info with an arbitrary text event, if there is a usage.
293
+ * C10_LOG_API_USAGE_ONCE("my_api");
294
+ */
295
+ #define C10_LOG_API_USAGE_ONCE(...) \
296
+ C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = \
297
+ ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__);
298
+
299
+ // API usage logging capabilities
300
+ C10_API void SetAPIUsageLogger(std::function<void(const std::string&)> logger);
301
+ C10_API void LogAPIUsage(const std::string& context);
302
+
303
+ C10_API void SetAPIUsageMetadataLogger(
304
+ std::function<void(
305
+ const std::string&,
306
+ const std::map<std::string, std::string>& metadata_map)> logger);
307
+ C10_API void LogAPIUsageMetadata(
308
+ const std::string& context,
309
+ const std::map<std::string, std::string>& metadata_map);
310
+
311
+ // PyTorch ddp usage logging capabilities
312
+ // DDPLoggingData holds data that can be logged in applications
313
+ // for analysis and debugging. Data structure is defined in
314
+ // c10 directory so that it can be easily imported by both c10
315
+ // and torch files.
316
+ struct DDPLoggingData {
317
+ // logging fields that are string types.
318
+ std::map<std::string, std::string> strs_map;
319
+ // logging fields that are int64_t types.
320
+ std::map<std::string, int64_t> ints_map;
321
+ };
322
+
323
+ C10_API void SetPyTorchDDPUsageLogger(
324
+ std::function<void(const DDPLoggingData&)> logger);
325
+ C10_API void LogPyTorchDDPUsage(const DDPLoggingData& ddpData);
326
+
327
+ namespace detail {
328
+ // Return value is needed to do the static variable initialization trick
329
+ C10_API bool LogAPIUsageFakeReturn(const std::string& context);
330
+ } // namespace detail
331
+
332
+ // Initializes the c10 logger.
333
+ C10_API void initLogging();
334
+
335
+ // Sets the rank, which will be included in log messages
336
+ C10_API void SetGlobalRank(int64_t rank);
337
+
338
+ } // namespace c10
339
+
340
+ #endif // C10_UTIL_LOGGING_H_
venv/lib/python3.10/site-packages/torch/include/c10/util/MathConstants.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/BFloat16.h>
5
+ #include <c10/util/Half.h>
6
+
7
+ C10_CLANG_DIAGNOSTIC_PUSH()
8
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
9
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
10
+ #endif
11
+
12
+ namespace c10 {
13
+ // TODO: Replace me with inline constexpr variable when C++17 becomes available
14
+ namespace detail {
15
+ template <typename T>
16
+ C10_HOST_DEVICE inline constexpr T e() {
17
+ return static_cast<T>(2.718281828459045235360287471352662);
18
+ }
19
+
20
+ template <typename T>
21
+ C10_HOST_DEVICE inline constexpr T euler() {
22
+ return static_cast<T>(0.577215664901532860606512090082402);
23
+ }
24
+
25
+ template <typename T>
26
+ C10_HOST_DEVICE inline constexpr T frac_1_pi() {
27
+ return static_cast<T>(0.318309886183790671537767526745028);
28
+ }
29
+
30
+ template <typename T>
31
+ C10_HOST_DEVICE inline constexpr T frac_1_sqrt_pi() {
32
+ return static_cast<T>(0.564189583547756286948079451560772);
33
+ }
34
+
35
+ template <typename T>
36
+ C10_HOST_DEVICE inline constexpr T frac_sqrt_2() {
37
+ return static_cast<T>(0.707106781186547524400844362104849);
38
+ }
39
+
40
+ template <typename T>
41
+ C10_HOST_DEVICE inline constexpr T frac_sqrt_3() {
42
+ return static_cast<T>(0.577350269189625764509148780501957);
43
+ }
44
+
45
+ template <typename T>
46
+ C10_HOST_DEVICE inline constexpr T golden_ratio() {
47
+ return static_cast<T>(1.618033988749894848204586834365638);
48
+ }
49
+
50
+ template <typename T>
51
+ C10_HOST_DEVICE inline constexpr T ln_10() {
52
+ return static_cast<T>(2.302585092994045684017991454684364);
53
+ }
54
+
55
+ template <typename T>
56
+ C10_HOST_DEVICE inline constexpr T ln_2() {
57
+ return static_cast<T>(0.693147180559945309417232121458176);
58
+ }
59
+
60
+ template <typename T>
61
+ C10_HOST_DEVICE inline constexpr T log_10_e() {
62
+ return static_cast<T>(0.434294481903251827651128918916605);
63
+ }
64
+
65
+ template <typename T>
66
+ C10_HOST_DEVICE inline constexpr T log_2_e() {
67
+ return static_cast<T>(1.442695040888963407359924681001892);
68
+ }
69
+
70
+ template <typename T>
71
+ C10_HOST_DEVICE inline constexpr T pi() {
72
+ return static_cast<T>(3.141592653589793238462643383279502);
73
+ }
74
+
75
+ template <typename T>
76
+ C10_HOST_DEVICE inline constexpr T sqrt_2() {
77
+ return static_cast<T>(1.414213562373095048801688724209698);
78
+ }
79
+
80
+ template <typename T>
81
+ C10_HOST_DEVICE inline constexpr T sqrt_3() {
82
+ return static_cast<T>(1.732050807568877293527446341505872);
83
+ }
84
+
85
+ template <>
86
+ C10_HOST_DEVICE inline constexpr BFloat16 pi<BFloat16>() {
87
+ // According to
88
+ // https://en.wikipedia.org/wiki/Bfloat16_floating-point_format#Special_values
89
+ // pi is encoded as 4049
90
+ return BFloat16(0x4049, BFloat16::from_bits());
91
+ }
92
+
93
+ template <>
94
+ C10_HOST_DEVICE inline constexpr Half pi<Half>() {
95
+ return Half(0x4248, Half::from_bits());
96
+ }
97
+ } // namespace detail
98
+
99
+ template <typename T>
100
+ constexpr T e = c10::detail::e<T>();
101
+
102
+ template <typename T>
103
+ constexpr T euler = c10::detail::euler<T>();
104
+
105
+ template <typename T>
106
+ constexpr T frac_1_pi = c10::detail::frac_1_pi<T>();
107
+
108
+ template <typename T>
109
+ constexpr T frac_1_sqrt_pi = c10::detail::frac_1_sqrt_pi<T>();
110
+
111
+ template <typename T>
112
+ constexpr T frac_sqrt_2 = c10::detail::frac_sqrt_2<T>();
113
+
114
+ template <typename T>
115
+ constexpr T frac_sqrt_3 = c10::detail::frac_sqrt_3<T>();
116
+
117
+ template <typename T>
118
+ constexpr T golden_ratio = c10::detail::golden_ratio<T>();
119
+
120
+ template <typename T>
121
+ constexpr T ln_10 = c10::detail::ln_10<T>();
122
+
123
+ template <typename T>
124
+ constexpr T ln_2 = c10::detail::ln_2<T>();
125
+
126
+ template <typename T>
127
+ constexpr T log_10_e = c10::detail::log_10_e<T>();
128
+
129
+ template <typename T>
130
+ constexpr T log_2_e = c10::detail::log_2_e<T>();
131
+
132
+ template <typename T>
133
+ constexpr T pi = c10::detail::pi<T>();
134
+
135
+ template <typename T>
136
+ constexpr T sqrt_2 = c10::detail::sqrt_2<T>();
137
+
138
+ template <typename T>
139
+ constexpr T sqrt_3 = c10::detail::sqrt_3<T>();
140
+ } // namespace c10
141
+
142
+ C10_CLANG_DIAGNOSTIC_POP()