applied-ai-018 commited on
Commit
c6cb1da
·
verified ·
1 Parent(s): e313d5c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h +14 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h +10 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h +4 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h +329 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h +549 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h +43 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h +47 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h +452 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h +8 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h +307 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h +442 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h +636 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h +892 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h +1586 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h +1335 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h +1108 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h +50 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h +344 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h +171 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h +391 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h +9 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h +2 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h +14 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h +21 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h +15 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h +29 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h +630 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h +401 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h +61 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h +85 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h +100 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h +52 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h +174 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h +57 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h +393 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h +133 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ChannelShuffleKernel.h +14 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DistributionTemplates.h +369 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IndexKernelUtils.h +88 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ReduceUtils.h +238 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/int_mm_kernel.h +16 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/mixed_data_type.h +41 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/moments_utils.h +206 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Activation.h +20 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDAJitLoops.cuh +296 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDALoops.cuh +348 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CompositeRandomAccessor.h +35 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTPlanCache.h +494 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTUtils.h +73 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /// Flush-To-Zero and Denormals-Are-Zero mode
2
+ ///
3
+ /// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass
4
+ /// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64
5
+ /// and some x86 CPUs. They result in reduced precision for values near zero,
6
+ /// but increased performance.
7
+ ///
8
+ /// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz
9
+
10
+ namespace at::cpu {
11
+
12
+ bool set_flush_denormal(bool on);
13
+
14
+ } // namespace at::cpu
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace at::cpu {
6
+
7
+ // Detect if CPU support Vector Neural Network Instruction.
8
+ TORCH_API bool is_cpu_support_vnni();
9
+
10
+ } // namespace at::cpu
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/functional_base.h>
4
+ #include <ATen/cpu/vec/functional_bfloat16.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at::vec {
10
+
11
+ // slow path
12
+ template <typename scalar_t, typename Op>
13
+ inline scalar_t vec_reduce_all(
14
+ const Op& vec_fun,
15
+ vec::Vectorized<scalar_t> acc_vec,
16
+ int64_t size) {
17
+ using Vec = vec::Vectorized<scalar_t>;
18
+ scalar_t acc_arr[Vec::size()];
19
+ acc_vec.store(acc_arr);
20
+ for (const auto i : c10::irange(1, size)) {
21
+ std::array<scalar_t, Vec::size()> acc_arr_next = {0};
22
+ acc_arr_next[0] = acc_arr[i];
23
+ Vec acc_vec_next = Vec::loadu(acc_arr_next.data());
24
+ acc_vec = vec_fun(acc_vec, acc_vec_next);
25
+ }
26
+ acc_vec.store(acc_arr);
27
+ return acc_arr[0];
28
+ }
29
+
30
+ template <typename scalar_t, typename Op>
31
+ struct VecReduceAllSIMD {
32
+ static inline scalar_t apply(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
33
+ return vec_reduce_all(vec_fun, acc_vec, Vectorized<scalar_t>::size());
34
+ }
35
+ };
36
+
37
+ #if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
38
+ #if defined(CPU_CAPABILITY_AVX2)
39
+ template <typename Op>
40
+ struct VecReduceAllSIMD<float, Op> {
41
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
42
+ using Vec = Vectorized<float>;
43
+ Vec v = acc_vec;
44
+ // 128-bit shuffle
45
+ Vec v1 = _mm256_permute2f128_ps(v, v, 0x1);
46
+ v = vec_fun(v, v1);
47
+ // 64-bit shuffle
48
+ v1 = _mm256_shuffle_ps(v, v, 0x4E);
49
+ v = vec_fun(v, v1);
50
+ // 32-bit shuffle
51
+ v1 = _mm256_shuffle_ps(v, v, 0xB1);
52
+ v = vec_fun(v, v1);
53
+ return _mm256_cvtss_f32(v);
54
+ }
55
+ };
56
+ #endif // defined(CPU_CAPABILITY_AVX2)
57
+ #if defined(CPU_CAPABILITY_AVX512)
58
+ template <typename Op>
59
+ struct VecReduceAllSIMD<float, Op> {
60
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
61
+ using Vec = Vectorized<float>;
62
+ Vec v = acc_vec;
63
+ // 256-bit shuffle
64
+ Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
65
+ v = vec_fun(v, v1);
66
+ // 128-bit shuffle
67
+ v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
68
+ v = vec_fun(v, v1);
69
+ // 64-bit shuffle
70
+ v1 = _mm512_shuffle_ps(v, v, 0x4E);
71
+ v = vec_fun(v, v1);
72
+ // 32-bit shuffle
73
+ v1 = _mm512_shuffle_ps(v, v, 0xB1);
74
+ v = vec_fun(v, v1);
75
+ return _mm512_cvtss_f32(v);
76
+ }
77
+ };
78
+ #endif // defined(CPU_CAPABILITY_AVX512)
79
+ #endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
80
+
81
+ template <typename scalar_t, typename Op>
82
+ inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
83
+ return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec);
84
+ }
85
+
86
+ template <typename scalar_t, typename Op,
87
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
88
+ inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
89
+ using Vec = vec::Vectorized<scalar_t>;
90
+ if (size < Vec::size())
91
+ return vec_reduce_all(vec_fun, Vec::loadu(data, size), size);
92
+ int64_t d = Vec::size();
93
+ Vec acc_vec = Vec::loadu(data);
94
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
95
+ Vec data_vec = Vec::loadu(data + d);
96
+ acc_vec = vec_fun(acc_vec, data_vec);
97
+ }
98
+ if (size - d > 0) {
99
+ Vec data_vec = Vec::loadu(data + d, size - d);
100
+ acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d);
101
+ }
102
+ return vec_reduce_all(vec_fun, acc_vec);
103
+ }
104
+
105
+ // similar to reduce_all, but reduces into two outputs
106
+ template <typename scalar_t, typename Op1, typename Op2,
107
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
108
+ inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
109
+ const scalar_t* data, int64_t size) {
110
+ using Vec = vec::Vectorized<scalar_t>;
111
+ if (size < Vec::size()) {
112
+ auto loaded_data = Vec::loadu(data, size);
113
+ return std::pair<scalar_t, scalar_t>(
114
+ vec_reduce_all(vec_fun1, loaded_data, size),
115
+ vec_reduce_all(vec_fun2, loaded_data, size));
116
+ }
117
+ int64_t d = Vec::size();
118
+ Vec acc_vec1 = Vec::loadu(data);
119
+ Vec acc_vec2 = Vec::loadu(data);
120
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
121
+ Vec data_vec = Vec::loadu(data + d);
122
+ acc_vec1 = vec_fun1(acc_vec1, data_vec);
123
+ acc_vec2 = vec_fun2(acc_vec2, data_vec);
124
+ }
125
+ if (size - d > 0) {
126
+ Vec data_vec = Vec::loadu(data + d, size - d);
127
+ acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d);
128
+ acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d);
129
+ }
130
+ return std::pair<scalar_t, scalar_t>(
131
+ vec_reduce_all(vec_fun1, acc_vec1),
132
+ vec_reduce_all(vec_fun2, acc_vec2));
133
+ }
134
+
135
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
136
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
137
+ inline scalar_t map_reduce_all(
138
+ const MapOp& map_fun,
139
+ const ReduceOp& red_fun,
140
+ const scalar_t* data,
141
+ int64_t size) {
142
+ using Vec = vec::Vectorized<scalar_t>;
143
+ if (size < Vec::size())
144
+ return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size);
145
+ int64_t d = Vec::size();
146
+ Vec acc_vec = map_fun(Vec::loadu(data));
147
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
148
+ Vec data_vec = Vec::loadu(data + d);
149
+ data_vec = map_fun(data_vec);
150
+ acc_vec = red_fun(acc_vec, data_vec);
151
+ }
152
+ if (size - d > 0) {
153
+ Vec data_vec = Vec::loadu(data + d, size - d);
154
+ data_vec = map_fun(data_vec);
155
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
156
+ }
157
+ return vec_reduce_all(red_fun, acc_vec);
158
+ }
159
+
160
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
161
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
162
+ inline scalar_t map2_reduce_all(
163
+ const MapOp& map_fun,
164
+ const ReduceOp& red_fun,
165
+ const scalar_t* data,
166
+ const scalar_t* data2,
167
+ int64_t size) {
168
+ using Vec = vec::Vectorized<scalar_t>;
169
+ if (size < Vec::size()) {
170
+ Vec data_vec = Vec::loadu(data, size);
171
+ Vec data2_vec = Vec::loadu(data2, size);
172
+ data_vec = map_fun(data_vec, data2_vec);
173
+ return vec_reduce_all(red_fun, data_vec, size);
174
+ }
175
+ int64_t d = Vec::size();
176
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2));
177
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
178
+ Vec data_vec = Vec::loadu(data + d);
179
+ Vec data2_vec = Vec::loadu(data2 + d);
180
+ data_vec = map_fun(data_vec, data2_vec);
181
+ acc_vec = red_fun(acc_vec, data_vec);
182
+ }
183
+ if (size - d > 0) {
184
+ Vec data_vec = Vec::loadu(data + d, size - d);
185
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
186
+ data_vec = map_fun(data_vec, data2_vec);
187
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
188
+ }
189
+ return vec_reduce_all(red_fun, acc_vec);
190
+ }
191
+
192
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
193
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
194
+ inline scalar_t map3_reduce_all(
195
+ const MapOp& map_fun,
196
+ const ReduceOp& red_fun,
197
+ const scalar_t* data,
198
+ const scalar_t* data2,
199
+ const scalar_t* data3,
200
+ int64_t size) {
201
+ using Vec = vec::Vectorized<scalar_t>;
202
+ if (size < Vec::size()) {
203
+ Vec data_vec = Vec::loadu(data, size);
204
+ Vec data2_vec = Vec::loadu(data2, size);
205
+ Vec data3_vec = Vec::loadu(data3, size);
206
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
207
+ return vec_reduce_all(red_fun, data_vec, size);
208
+ }
209
+
210
+ int64_t d = Vec::size();
211
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3));
212
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
213
+ Vec data_vec = Vec::loadu(data + d);
214
+ Vec data2_vec = Vec::loadu(data2 + d);
215
+ Vec data3_vec = Vec::loadu(data3 + d);
216
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
217
+ acc_vec = red_fun(acc_vec, data_vec);
218
+ }
219
+ if (size - d > 0) {
220
+ Vec data_vec = Vec::loadu(data + d, size - d);
221
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
222
+ Vec data3_vec = Vec::loadu(data3 + d, size - d);
223
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
224
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
225
+ }
226
+ return vec_reduce_all(red_fun, acc_vec);
227
+ }
228
+
229
+ template <typename scalar_t, typename Op,
230
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
231
+ inline void map(
232
+ const Op& vec_fun,
233
+ scalar_t* output_data,
234
+ const scalar_t* input_data,
235
+ int64_t size) {
236
+ using Vec = vec::Vectorized<scalar_t>;
237
+ int64_t d = 0;
238
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
239
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d));
240
+ output_vec.store(output_data + d);
241
+ }
242
+ if (size - d > 0) {
243
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d));
244
+ output_vec.store(output_data + d, size - d);
245
+ }
246
+ }
247
+
248
+ template <typename scalar_t, typename Op,
249
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
250
+ inline void map2(
251
+ const Op& vec_fun,
252
+ scalar_t* output_data,
253
+ const scalar_t* input_data,
254
+ const scalar_t* input_data2,
255
+ int64_t size) {
256
+ using Vec = vec::Vectorized<scalar_t>;
257
+ int64_t d = 0;
258
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
259
+ Vec data_vec = Vec::loadu(input_data + d);
260
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
261
+ Vec output_vec = vec_fun(data_vec, data_vec2);
262
+ output_vec.store(output_data + d);
263
+ }
264
+ if (size - d > 0) {
265
+ Vec data_vec = Vec::loadu(input_data + d, size - d);
266
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
267
+ Vec output_vec = vec_fun(data_vec, data_vec2);
268
+ output_vec.store(output_data + d, size - d);
269
+ }
270
+ }
271
+
272
+ template <typename scalar_t, typename Op,
273
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
274
+ inline void map3(
275
+ const Op& vec_fun,
276
+ scalar_t* output_data,
277
+ const scalar_t* input_data1,
278
+ const scalar_t* input_data2,
279
+ const scalar_t* input_data3,
280
+ int64_t size) {
281
+ using Vec = vec::Vectorized<scalar_t>;
282
+ int64_t d = 0;
283
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
284
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
285
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
286
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
287
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
288
+ output_vec.store(output_data + d);
289
+ }
290
+ if (size - d > 0) {
291
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
292
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
293
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
294
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
295
+ output_vec.store(output_data + d, size - d);
296
+ }
297
+ }
298
+
299
+ template <typename scalar_t, typename Op,
300
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
301
+ inline void map4(
302
+ const Op& vec_fun,
303
+ scalar_t* output_data,
304
+ const scalar_t* input_data1,
305
+ const scalar_t* input_data2,
306
+ const scalar_t* input_data3,
307
+ const scalar_t* input_data4,
308
+ int64_t size) {
309
+ using Vec = vec::Vectorized<scalar_t>;
310
+ int64_t d = 0;
311
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
312
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
313
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
314
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
315
+ Vec data_vec4 = Vec::loadu(input_data4 + d);
316
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
317
+ output_vec.store(output_data + d);
318
+ }
319
+ if (size - d > 0) {
320
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
321
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
322
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
323
+ Vec data_vec4 = Vec::loadu(input_data4 + d, size - d);
324
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
325
+ output_vec.store(output_data + d, size - d);
326
+ }
327
+ }
328
+
329
+ } // namespace at::vec
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+
8
+ namespace at::vec {
9
+
10
+ // BFloat16 specification
11
+ template <typename scalar_t> struct VecScalarType { using type = scalar_t; };
12
+ template <> struct VecScalarType<BFloat16> { using type = float; };
13
+ template <> struct VecScalarType<Half> { using type = float; };
14
+
15
+ // This is different from at::acc_type since we only need to specialize BFloat16
16
+ template <typename scalar_t>
17
+ using vec_scalar_t = typename VecScalarType<scalar_t>::type;
18
+
19
+ // Vector conversion between float and bfloat16/half
20
+ template <typename scalar_t,
21
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
22
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float(const Vectorized<scalar_t>&);
23
+
24
+ template <>
25
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<BFloat16> (const Vectorized<BFloat16>& a) {
26
+ return convert_bfloat16_float(a);
27
+ }
28
+
29
+ template <>
30
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<Half> (const Vectorized<Half>& a) {
31
+ return convert_half_float(a);
32
+ }
33
+
34
+ template <typename scalar_t,
35
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
36
+ inline Vectorized<scalar_t> convert_from_float(const Vectorized<float>&, const Vectorized<float>&);
37
+
38
+ template <>
39
+ inline Vectorized<BFloat16> convert_from_float<BFloat16>(const Vectorized<float>& a, const Vectorized<float>& b) {
40
+ return convert_float_bfloat16(a, b);
41
+ }
42
+
43
+ template <>
44
+ inline Vectorized<Half> convert_from_float<Half>(const Vectorized<float>& a, const Vectorized<float>& b) {
45
+ return convert_float_half(a, b);
46
+ }
47
+
48
+ template <typename scalar_t,
49
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
50
+ inline void load_to_float(const scalar_t *data, Vectorized<float> &out1, Vectorized<float> &out2);
51
+
52
+ template <>
53
+ inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out1, Vectorized<float> &out2) {
54
+ load_fp32_from_bf16(data, out1, out2);
55
+ }
56
+
57
+ template <>
58
+ inline void load_to_float<Half> (const Half *data, Vectorized<float> &out1, Vectorized<float> &out2) {
59
+ load_fp32_from_fp16(data, out1, out2);
60
+ }
61
+
62
+ template <typename scalar_t,
63
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
64
+ inline void load_to_float(const scalar_t *data, Vectorized<float> &out);
65
+
66
+ template <>
67
+ inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out) {
68
+ load_fp32_from_bf16(data, out);
69
+ }
70
+
71
+ template <>
72
+ inline void load_to_float<Half> (const Half *data, Vectorized<float> &out) {
73
+ load_fp32_from_fp16(data, out);
74
+ }
75
+
76
+ // Note that we already have specialized member of Vectorized<scalar_t> for BFloat16
77
+ // so the following functions would run smoothly:
78
+ // using Vec = Vectorized<BFloat16>;
79
+ // Vec one = Vec(BFloat16(1));
80
+ // vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N);
81
+ //
82
+ // Then why we still need to specialize "functional"?
83
+ // If we do specialization at Vectorized<> level, the above example would need 3 pairs of
84
+ // conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/".
85
+ // If we do specialization at vec::map<>() level, we have only 1 pair of conversion
86
+ // of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only.
87
+ //
88
+ // The following BFloat16 functionality will only do data type conversion for input
89
+ // and output vector (reduce functionality will only convert the final scalar back to bf16).
90
+ // Compared to Vectorized<> specialization,
91
+ // 1. better performance since we have less data type conversion;
92
+ // 2. less rounding error since immediate results are kept in fp32;
93
+ // 3. accumulation done on data type of fp32.
94
+ //
95
+ // If you plan to extend this file, please ensure adding unit tests at
96
+ // aten/src/ATen/test/vec_test_all_types.cpp
97
+ //
98
+ template <typename scalar_t, typename Op,
99
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
100
+ inline float reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
101
+ using bVec = vec::Vectorized<scalar_t>;
102
+ using fVec = vec::Vectorized<float>;
103
+ if (size < bVec::size()) {
104
+ bVec data_bvec = bVec::loadu(data, size);
105
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
106
+ if (size > fVec::size()) {
107
+ data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size());
108
+ return vec_reduce_all<float>(vec_fun, data_fvec0, fVec::size());
109
+ } else {
110
+ return vec_reduce_all<float>(vec_fun, data_fvec0, size);
111
+ }
112
+ }
113
+ int64_t d = bVec::size();
114
+ bVec acc_bvec = bVec::loadu(data);
115
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
116
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
117
+ bVec data_bvec = bVec::loadu(data + d);
118
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
119
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
120
+ acc_fvec1 = vec_fun(acc_fvec1, data_fvec1);
121
+ }
122
+ if (size - d > 0) {
123
+ bVec data_bvec = bVec::loadu(data + d, size - d);
124
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
125
+ if (size - d > fVec::size()) {
126
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
127
+ acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
128
+ } else {
129
+ acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d);
130
+ }
131
+ }
132
+ acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1);
133
+ return vec_reduce_all<float>(vec_fun, acc_fvec0);
134
+ }
135
+
136
+ template <typename scalar_t, typename Op1, typename Op2,
137
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
138
+ inline std::pair<float, float> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
139
+ const scalar_t* data, int64_t size) {
140
+ using bVec = vec::Vectorized<scalar_t>;
141
+ using fVec = vec::Vectorized<float>;
142
+ if (size < bVec::size()) {
143
+ bVec data_bvec = bVec::loadu(data, size);
144
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
145
+ if (size > fVec::size()) {
146
+ fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size());
147
+ fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size());
148
+ return std::pair<scalar_t, scalar_t>(
149
+ vec_reduce_all<float>(vec_fun1, acc1_fvec, fVec::size()),
150
+ vec_reduce_all<float>(vec_fun2, acc2_fvec, fVec::size()));
151
+ } else {
152
+ return std::pair<scalar_t, scalar_t>(
153
+ vec_reduce_all<float>(vec_fun1, data_fvec0, size),
154
+ vec_reduce_all<float>(vec_fun2, data_fvec0, size));
155
+ }
156
+ }
157
+ int64_t d = bVec::size();
158
+ bVec acc_bvec = bVec::loadu(data);
159
+ auto [acc1_fvec0, acc1_fvec1] = convert_to_float<scalar_t>(acc_bvec);
160
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc_bvec);
161
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
162
+ bVec data_bvec = bVec::loadu(data + d);
163
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
164
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
165
+ acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1);
166
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
167
+ acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1);
168
+ }
169
+ if (size - d > 0) {
170
+ bVec data_bvec = bVec::loadu(data + d, size - d);
171
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
172
+ if (size - d > fVec::size()) {
173
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
174
+ acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size());
175
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
176
+ acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size());
177
+ } else {
178
+ acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d);
179
+ acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d);
180
+ }
181
+ }
182
+ acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1);
183
+ acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1);
184
+ return std::pair<scalar_t, scalar_t>(
185
+ vec_reduce_all<float>(vec_fun1, acc1_fvec0),
186
+ vec_reduce_all<float>(vec_fun2, acc2_fvec0));
187
+ }
188
+
189
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
190
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
191
+ inline float map_reduce_all(
192
+ const MapOp& map_fun,
193
+ const ReduceOp& red_fun,
194
+ const scalar_t* data,
195
+ int64_t size) {
196
+ using bVec = vec::Vectorized<scalar_t>;
197
+ using fVec = vec::Vectorized<float>;
198
+ if (size < bVec::size()) {
199
+ bVec data_bvec = bVec::loadu(data, size);
200
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
201
+ if (size > fVec::size()) {
202
+ data_fvec0 = map_fun(data_fvec0);
203
+ data_fvec1 = map_fun(data_fvec1);
204
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
205
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
206
+ } else {
207
+ data_fvec0 = map_fun(data_fvec0);
208
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
209
+ }
210
+ }
211
+ int64_t d = bVec::size();
212
+ bVec acc_bvec = bVec::loadu(data);
213
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
214
+ acc_fvec0 = map_fun(acc_fvec0);
215
+ acc_fvec1 = map_fun(acc_fvec1);
216
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
217
+ bVec data_bvec = bVec::loadu(data + d);
218
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
219
+ data_fvec0 = map_fun(data_fvec0);
220
+ data_fvec1 = map_fun(data_fvec1);
221
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
222
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
223
+ }
224
+ if (size - d > 0) {
225
+ bVec data_bvec = bVec::loadu(data + d, size - d);
226
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
227
+ if (size - d > fVec::size()) {
228
+ data_fvec0 = map_fun(data_fvec0);
229
+ data_fvec1 = map_fun(data_fvec1);
230
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
231
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
232
+ } else {
233
+ data_fvec0 = map_fun(data_fvec0);
234
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
235
+ }
236
+ }
237
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
238
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
239
+ }
240
+
241
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
242
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
243
+ inline float map2_reduce_all(
244
+ const MapOp& map_fun,
245
+ const ReduceOp& red_fun,
246
+ const scalar_t* data,
247
+ const scalar_t* data2,
248
+ int64_t size) {
249
+ using bVec = vec::Vectorized<scalar_t>;
250
+ using fVec = vec::Vectorized<float>;
251
+ if (size < bVec::size()) {
252
+ bVec data_bvec = bVec::loadu(data, size);
253
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
254
+ bVec data2_bvec = bVec::loadu(data2, size);
255
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
256
+ if (size > fVec::size()) {
257
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
258
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
259
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
260
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
261
+ } else {
262
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
263
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
264
+ }
265
+ }
266
+ int64_t d = bVec::size();
267
+ bVec acc_bvec = bVec::loadu(data);
268
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
269
+ bVec acc2_bvec = bVec::loadu(data2);
270
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
271
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0);
272
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1);
273
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
274
+ bVec data_bvec = bVec::loadu(data + d);
275
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
276
+ bVec data2_bvec = bVec::loadu(data2 + d);
277
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
278
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
279
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
280
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
281
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
282
+ }
283
+ if (size - d > 0) {
284
+ bVec data_bvec = bVec::loadu(data + d, size - d);
285
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
286
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
287
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
288
+ if (size - d > fVec::size()) {
289
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
290
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
291
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
292
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
293
+ } else {
294
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
295
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
296
+ }
297
+ }
298
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
299
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
300
+ }
301
+
302
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
303
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
304
+ inline float map3_reduce_all(
305
+ const MapOp& map_fun,
306
+ const ReduceOp& red_fun,
307
+ const scalar_t* data,
308
+ const scalar_t* data2,
309
+ const scalar_t* data3,
310
+ int64_t size) {
311
+ using bVec = vec::Vectorized<scalar_t>;
312
+ using fVec = vec::Vectorized<float>;
313
+ if (size < bVec::size()) {
314
+ bVec data_bvec = bVec::loadu(data, size);
315
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
316
+ bVec data2_bvec = bVec::loadu(data2, size);
317
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
318
+ bVec data3_bvec = bVec::loadu(data3, size);
319
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
320
+ if (size > fVec::size()) {
321
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
322
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
323
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
324
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
325
+ } else {
326
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
327
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
328
+ }
329
+ }
330
+ int64_t d = bVec::size();
331
+ bVec acc_bvec = bVec::loadu(data);
332
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
333
+ bVec acc2_bvec = bVec::loadu(data2);
334
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
335
+ bVec acc3_bvec = bVec::loadu(data3);
336
+ auto [acc3_fvec0, acc3_fvec1] = convert_to_float<scalar_t>(acc3_bvec);
337
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0);
338
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1);
339
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
340
+ bVec data_bvec = bVec::loadu(data + d);
341
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
342
+ bVec data2_bvec = bVec::loadu(data2 + d);
343
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
344
+ bVec data3_bvec = bVec::loadu(data3 + d);
345
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
346
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
347
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
348
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
349
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
350
+ }
351
+ if (size - d > 0) {
352
+ bVec data_bvec = bVec::loadu(data + d, size - d);
353
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
354
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
355
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
356
+ bVec data3_bvec = bVec::loadu(data3 + d, size - d);
357
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
358
+ if (size - d > fVec::size()) {
359
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
360
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
361
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
362
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
363
+ } else {
364
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
365
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
366
+ }
367
+ }
368
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
369
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
370
+ }
371
+
372
+ template <typename scalar_t, typename Op,
373
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
374
+ inline void map(
375
+ const Op& vec_fun,
376
+ scalar_t* output_data,
377
+ const scalar_t* input_data,
378
+ int64_t size) {
379
+ using bVec = vec::Vectorized<scalar_t>;
380
+ using fVec = vec::Vectorized<float>;
381
+ int64_t d = 0;
382
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
383
+ bVec data_bvec = bVec::loadu(input_data + d);
384
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
385
+ fVec output_fvec0 = vec_fun(data_fvec0);
386
+ fVec output_fvec1 = vec_fun(data_fvec1);
387
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
388
+ output_bvec.store(output_data + d);
389
+ }
390
+ if (size - d > 0) {
391
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
392
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
393
+ fVec output_fvec0 = vec_fun(data_fvec0);
394
+ fVec output_fvec1 = vec_fun(data_fvec1);
395
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
396
+ output_bvec.store(output_data + d, size - d);
397
+ }
398
+ }
399
+
400
+ template <typename scalar_t, typename Op,
401
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
402
+ inline void map(
403
+ const Op& vec_fun,
404
+ scalar_t* output_data,
405
+ const float* input_data,
406
+ int64_t size) {
407
+ using bVec = vec::Vectorized<scalar_t>;
408
+ using fVec = vec::Vectorized<float>;
409
+ int64_t d = 0;
410
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
411
+ fVec data_fvec0 = fVec::loadu(input_data + d);
412
+ fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size());
413
+ fVec output_fvec0 = vec_fun(data_fvec0);
414
+ fVec output_fvec1 = vec_fun(data_fvec1);
415
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
416
+ output_bvec.store(output_data + d);
417
+ }
418
+ if (size - d > 0) {
419
+ fVec data_fvec0, data_fvec1;
420
+ if (size - d > fVec::size()) {
421
+ data_fvec0 = fVec::loadu(input_data + d);
422
+ data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size());
423
+ } else {
424
+ // choose to align with behaviour of bVec::loadu(ptr, size),
425
+ // which leaves data_fvec1 uninitialized
426
+ data_fvec0 = fVec::loadu(input_data + d, size - d);
427
+ }
428
+ fVec output_fvec0 = vec_fun(data_fvec0);
429
+ fVec output_fvec1 = vec_fun(data_fvec1);
430
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
431
+ output_bvec.store(output_data + d, size - d);
432
+ }
433
+ }
434
+
435
+ template <typename scalar_t, typename Op,
436
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
437
+ inline void map2(
438
+ const Op& vec_fun,
439
+ scalar_t* output_data,
440
+ const scalar_t* input_data,
441
+ const scalar_t* input_data2,
442
+ int64_t size) {
443
+ using bVec = vec::Vectorized<scalar_t>;
444
+ using fVec = vec::Vectorized<float>;
445
+ int64_t d = 0;
446
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
447
+ bVec data_bvec = bVec::loadu(input_data + d);
448
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
449
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
450
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
451
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
452
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
453
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
454
+ output_bvec.store(output_data + d);
455
+ }
456
+ if (size - d > 0) {
457
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
458
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
459
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
460
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
461
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
462
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
463
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
464
+ output_bvec.store(output_data + d, size - d);
465
+ }
466
+ }
467
+
468
+ template <typename scalar_t, typename Op,
469
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
470
+ inline void map3(
471
+ const Op& vec_fun,
472
+ scalar_t* output_data,
473
+ const scalar_t* input_data1,
474
+ const scalar_t* input_data2,
475
+ const scalar_t* input_data3,
476
+ int64_t size) {
477
+ using bVec = vec::Vectorized<scalar_t>;
478
+ using fVec = vec::Vectorized<float>;
479
+ int64_t d = 0;
480
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
481
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
482
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
483
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
484
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
485
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
486
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
487
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
488
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
489
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
490
+ output_bvec.store(output_data + d);
491
+ }
492
+ if (size - d > 0) {
493
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
494
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
495
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
496
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
497
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
498
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
499
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
500
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
501
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
502
+ output_bvec.store(output_data + d, size - d);
503
+ }
504
+ }
505
+
506
+ template <typename scalar_t, typename Op,
507
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
508
+ inline void map4(
509
+ const Op& vec_fun,
510
+ scalar_t* output_data,
511
+ const scalar_t* input_data1,
512
+ const scalar_t* input_data2,
513
+ const scalar_t* input_data3,
514
+ const scalar_t* input_data4,
515
+ int64_t size) {
516
+ using bVec = vec::Vectorized<scalar_t>;
517
+ using fVec = vec::Vectorized<float>;
518
+ int64_t d = 0;
519
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
520
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
521
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
522
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
523
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
524
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
525
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
526
+ bVec data4_bvec = bVec::loadu(input_data4 + d);
527
+ auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
528
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
529
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
530
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
531
+ output_bvec.store(output_data + d);
532
+ }
533
+ if (size - d > 0) {
534
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
535
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
536
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
537
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
538
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
539
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
540
+ bVec data4_bvec = bVec::loadu(input_data4 + d, size - d);
541
+ auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
542
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
543
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
544
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
545
+ output_bvec.store(output_data + d, size - d);
546
+ }
547
+ }
548
+
549
+ } // namespace at::vec
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
3
+ /* GCC or clang-compatible compiler, targeting x86/x86-64 */
4
+ #include <x86intrin.h>
5
+ #elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__))
6
+ /* Clang-compatible compiler, targeting arm neon */
7
+ #include <arm_neon.h>
8
+ #elif defined(_MSC_VER)
9
+ /* Microsoft C/C++-compatible compiler */
10
+ #include <intrin.h>
11
+ #if _MSC_VER <= 1900
12
+ #define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2))
13
+ #define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4))
14
+ #define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8))
15
+ #define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16))
16
+ #endif
17
+ #elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__))
18
+ /* GCC-compatible compiler, targeting ARM with NEON */
19
+ #include <arm_neon.h>
20
+ #if defined (MISSING_ARM_VLD1)
21
+ #include <ATen/cpu/vec/vec256/missing_vld1_neon.h>
22
+ #elif defined (MISSING_ARM_VST1)
23
+ #include <ATen/cpu/vec/vec256/missing_vst1_neon.h>
24
+ #endif
25
+ #elif defined(__GNUC__) && defined(__IWMMXT__)
26
+ /* GCC-compatible compiler, targeting ARM with WMMX */
27
+ #include <mmintrin.h>
28
+ #elif defined(__s390x__)
29
+ // targets Z/architecture
30
+ // we will include vecintrin later
31
+ #elif (defined(__GNUC__) || defined(__xlC__)) && \
32
+ (defined(__VEC__) || defined(__ALTIVEC__))
33
+ /* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
34
+ #include <altivec.h>
35
+ /* We need to undef those tokens defined by <altivec.h> to avoid conflicts
36
+ with the C++ types. => Can still use __bool/__vector */
37
+ #undef bool
38
+ #undef vector
39
+ #undef pixel
40
+ #elif defined(__GNUC__) && defined(__SPE__)
41
+ /* GCC-compatible compiler, targeting PowerPC with SPE */
42
+ #include <spe.h>
43
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if defined(CPU_CAPABILITY_AVX512)
4
+ #include <ATen/cpu/vec/vec512/vec512.h>
5
+ #else
6
+ #include <ATen/cpu/vec/vec256/vec256.h>
7
+ #endif
8
+
9
+ namespace at::vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) {
14
+ __at_align__ bool buffer[x.size()];
15
+ x.ne(Vectorized<int8_t>(0)).store(buffer);
16
+
17
+ Vectorized<bool> ret;
18
+ static_assert(x.size() == ret.size(), "");
19
+ std::memcpy(ret, buffer, ret.size() * sizeof(bool));
20
+ return ret;
21
+ }
22
+
23
+ template <>
24
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr) {
25
+ // See NOTE [Loading boolean values]
26
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr));
27
+ }
28
+
29
+ template <>
30
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr, int64_t count) {
31
+ // See NOTE [Loading boolean values]
32
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr, count));
33
+ }
34
+
35
+ template <typename VT>
36
+ struct VecHoldType { using hold_type = typename VT::value_type; };
37
+
38
+ template <>
39
+ struct VecHoldType<Vectorized<BFloat16>> { using hold_type = BFloat16; };
40
+
41
+ template <>
42
+ struct VecHoldType<Vectorized<Half>> {using hold_type = Half; };
43
+
44
+ template <typename VT>
45
+ using vechold_type = typename VecHoldType<VT>::hold_type;
46
+
47
+ }} // namespace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vld1_*_x2 and vst1_*_x2 intrinsics in gcc-7. */
2
+
3
+ __extension__ extern __inline uint8x8x2_t
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vld1_u8_x2 (const uint8_t *__a)
6
+ {
7
+ uint8x8x2_t ret;
8
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
9
+ return ret;
10
+ }
11
+
12
+ __extension__ extern __inline int8x8x2_t
13
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14
+ vld1_s8_x2 (const int8_t *__a)
15
+ {
16
+ int8x8x2_t ret;
17
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
18
+ return ret;
19
+ }
20
+
21
+ __extension__ extern __inline uint16x4x2_t
22
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23
+ vld1_u16_x2 (const uint16_t *__a)
24
+ {
25
+ uint16x4x2_t ret;
26
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
27
+ return ret;
28
+ }
29
+
30
+ __extension__ extern __inline int16x4x2_t
31
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32
+ vld1_s16_x2 (const int16_t *__a)
33
+ {
34
+ int16x4x2_t ret;
35
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
36
+ return ret;
37
+ }
38
+
39
+ __extension__ extern __inline uint32x2x2_t
40
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
41
+ vld1_u32_x2 (const uint32_t *__a)
42
+ {
43
+ uint32x2x2_t ret;
44
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
45
+ return ret;
46
+ }
47
+
48
+ __extension__ extern __inline int32x2x2_t
49
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
50
+ vld1_s32_x2 (const int32_t *__a)
51
+ {
52
+ int32x2x2_t ret;
53
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
54
+ return ret;
55
+ }
56
+
57
+ __extension__ extern __inline uint64x1x2_t
58
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
59
+ vld1_u64_x2 (const uint64_t *__a)
60
+ {
61
+ uint64x1x2_t ret;
62
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
63
+ return ret;
64
+ }
65
+
66
+ __extension__ extern __inline int64x1x2_t
67
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
68
+ vld1_s64_x2 (const int64_t *__a)
69
+ {
70
+ int64x1x2_t ret;
71
+ __builtin_aarch64_simd_oi __o;
72
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
73
+ return ret;
74
+ }
75
+
76
+ __extension__ extern __inline float16x4x2_t
77
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
78
+ vld1_f16_x2 (const float16_t *__a)
79
+ {
80
+ float16x4x2_t ret;
81
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
82
+ return ret;
83
+ }
84
+
85
+ __extension__ extern __inline float32x2x2_t
86
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
87
+ vld1_f32_x2 (const float32_t *__a)
88
+ {
89
+ float32x2x2_t ret;
90
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
91
+ return ret;
92
+ }
93
+
94
+ __extension__ extern __inline float64x1x2_t
95
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
96
+ vld1_f64_x2 (const float64_t *__a)
97
+ {
98
+ float64x1x2_t ret;
99
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
100
+ return ret;
101
+ }
102
+
103
+ __extension__ extern __inline poly8x8x2_t
104
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
105
+ vld1_p8_x2 (const poly8_t *__a)
106
+ {
107
+ poly8x8x2_t ret;
108
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
109
+ return ret;
110
+ }
111
+
112
+ __extension__ extern __inline poly16x4x2_t
113
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
114
+ vld1_p16_x2 (const poly16_t *__a)
115
+ {
116
+ poly16x4x2_t ret;
117
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
118
+ return ret;
119
+ }
120
+
121
+ __extension__ extern __inline poly64x1x2_t
122
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
123
+ vld1_p64_x2 (const poly64_t *__a)
124
+ {
125
+ poly64x1x2_t ret;
126
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
127
+ return ret;
128
+ }
129
+
130
+ __extension__ extern __inline uint8x16x2_t
131
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
132
+ vld1q_u8_x2 (const uint8_t *__a)
133
+ {
134
+ uint8x16x2_t ret;
135
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
136
+ return ret;
137
+ }
138
+
139
+ __extension__ extern __inline int8x16x2_t
140
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
141
+ vld1q_s8_x2 (const int8_t *__a)
142
+ {
143
+ int8x16x2_t ret;
144
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
145
+ return ret;
146
+ }
147
+
148
+ __extension__ extern __inline uint16x8x2_t
149
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
150
+ vld1q_u16_x2 (const uint16_t *__a)
151
+ {
152
+ uint16x8x2_t ret;
153
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
154
+ return ret;
155
+ }
156
+
157
+ __extension__ extern __inline int16x8x2_t
158
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
159
+ vld1q_s16_x2 (const int16_t *__a)
160
+ {
161
+ int16x8x2_t ret;
162
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
163
+ return ret;
164
+ }
165
+
166
+ __extension__ extern __inline uint32x4x2_t
167
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
168
+ vld1q_u32_x2 (const uint32_t *__a)
169
+ {
170
+ uint32x4x2_t ret;
171
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
172
+ return ret;
173
+ }
174
+
175
+ __extension__ extern __inline int32x4x2_t
176
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
177
+ vld1q_s32_x2 (const int32_t *__a)
178
+ {
179
+ int32x4x2_t ret;
180
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
181
+ return ret;
182
+ }
183
+
184
+ __extension__ extern __inline uint64x2x2_t
185
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
186
+ vld1q_u64_x2 (const uint64_t *__a)
187
+ {
188
+ uint64x2x2_t ret;
189
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
190
+ return ret;
191
+ }
192
+
193
+ __extension__ extern __inline int64x2x2_t
194
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
195
+ vld1q_s64_x2 (const int64_t *__a)
196
+ {
197
+ int64x2x2_t ret;
198
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
199
+ return ret;
200
+ }
201
+
202
+ __extension__ extern __inline float16x8x2_t
203
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
204
+ vld1q_f16_x2 (const float16_t *__a)
205
+ {
206
+ float16x8x2_t ret;
207
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
208
+ return ret;
209
+ }
210
+
211
+ __extension__ extern __inline float32x4x2_t
212
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
213
+ vld1q_f32_x2 (const float32_t *__a)
214
+ {
215
+ float32x4x2_t ret;
216
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
217
+ return ret;
218
+ }
219
+
220
+ __extension__ extern __inline float64x2x2_t
221
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
222
+ vld1q_f64_x2 (const float64_t *__a)
223
+ {
224
+ float64x2x2_t ret;
225
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
226
+ return ret;
227
+ }
228
+
229
+ __extension__ extern __inline poly8x16x2_t
230
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
231
+ vld1q_p8_x2 (const poly8_t *__a)
232
+ {
233
+ poly8x16x2_t ret;
234
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
235
+ return ret;
236
+ }
237
+
238
+ __extension__ extern __inline poly16x8x2_t
239
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
240
+ vld1q_p16_x2 (const poly16_t *__a)
241
+ {
242
+ poly16x8x2_t ret;
243
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
244
+ return ret;
245
+ }
246
+
247
+ __extension__ extern __inline poly64x2x2_t
248
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
249
+ vld1q_p64_x2 (const poly64_t *__a)
250
+ {
251
+ poly64x2x2_t ret;
252
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
253
+ return ret;
254
+ }
255
+
256
+ /* vst1x2 */
257
+
258
+ __extension__ extern __inline void
259
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
260
+ vst1_s64_x2 (int64_t * __a, int64x1x2_t val)
261
+ {
262
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
263
+ }
264
+
265
+ __extension__ extern __inline void
266
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
267
+ vst1_u64_x2 (uint64_t * __a, uint64x1x2_t val)
268
+ {
269
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
270
+ }
271
+
272
+ __extension__ extern __inline void
273
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
274
+ vst1_f64_x2 (float64_t * __a, float64x1x2_t val)
275
+ {
276
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
277
+ }
278
+
279
+ __extension__ extern __inline void
280
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
281
+ vst1_s8_x2 (int8_t * __a, int8x8x2_t val)
282
+ {
283
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
284
+ }
285
+
286
+ __extension__ extern __inline void
287
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
288
+ vst1_p8_x2 (poly8_t * __a, poly8x8x2_t val)
289
+ {
290
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
291
+ }
292
+
293
+ __extension__ extern __inline void
294
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
295
+ vst1_s16_x2 (int16_t * __a, int16x4x2_t val)
296
+ {
297
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
298
+ }
299
+
300
+ __extension__ extern __inline void
301
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
302
+ vst1_p16_x2 (poly16_t * __a, poly16x4x2_t val)
303
+ {
304
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
305
+ }
306
+
307
+ __extension__ extern __inline void
308
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
309
+ vst1_s32_x2 (int32_t * __a, int32x2x2_t val)
310
+ {
311
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
312
+ }
313
+
314
+ __extension__ extern __inline void
315
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
316
+ vst1_u8_x2 (uint8_t * __a, uint8x8x2_t val)
317
+ {
318
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
319
+ }
320
+
321
+ __extension__ extern __inline void
322
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
323
+ vst1_u16_x2 (uint16_t * __a, uint16x4x2_t val)
324
+ {
325
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
326
+ }
327
+
328
+ __extension__ extern __inline void
329
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
330
+ vst1_u32_x2 (uint32_t * __a, uint32x2x2_t val)
331
+ {
332
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
333
+ }
334
+
335
+ __extension__ extern __inline void
336
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
337
+ vst1_f16_x2 (float16_t * __a, float16x4x2_t val)
338
+ {
339
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
340
+ }
341
+
342
+ __extension__ extern __inline void
343
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
344
+ vst1_f32_x2 (float32_t * __a, float32x2x2_t val)
345
+ {
346
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
347
+ }
348
+
349
+ __extension__ extern __inline void
350
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
351
+ vst1_p64_x2 (poly64_t * __a, poly64x1x2_t val)
352
+ {
353
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
354
+ }
355
+
356
+ __extension__ extern __inline void
357
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
358
+ vst1q_s8_x2 (int8_t * __a, int8x16x2_t val)
359
+ {
360
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
361
+ }
362
+
363
+ __extension__ extern __inline void
364
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
365
+ vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t val)
366
+ {
367
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
368
+ }
369
+
370
+ __extension__ extern __inline void
371
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
372
+ vst1q_s16_x2 (int16_t * __a, int16x8x2_t val)
373
+ {
374
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
375
+ }
376
+
377
+ __extension__ extern __inline void
378
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
379
+ vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t val)
380
+ {
381
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
382
+ }
383
+
384
+ __extension__ extern __inline void
385
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
386
+ vst1q_s32_x2 (int32_t * __a, int32x4x2_t val)
387
+ {
388
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
389
+ }
390
+
391
+ __extension__ extern __inline void
392
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
393
+ vst1q_s64_x2 (int64_t * __a, int64x2x2_t val)
394
+ {
395
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
396
+ }
397
+
398
+ __extension__ extern __inline void
399
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
400
+ vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t val)
401
+ {
402
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
403
+ }
404
+
405
+ __extension__ extern __inline void
406
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
407
+ vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t val)
408
+ {
409
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
410
+ }
411
+
412
+ __extension__ extern __inline void
413
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
414
+ vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t val)
415
+ {
416
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
417
+ }
418
+
419
+ __extension__ extern __inline void
420
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
421
+ vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t val)
422
+ {
423
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
424
+ }
425
+
426
+ __extension__ extern __inline void
427
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
428
+ vst1q_f16_x2 (float16_t * __a, float16x8x2_t val)
429
+ {
430
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
431
+ }
432
+
433
+ __extension__ extern __inline void
434
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
435
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
436
+ {
437
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
438
+ }
439
+
440
+ __extension__ extern __inline void
441
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
442
+ vst1q_f64_x2 (float64_t * __a, float64x2x2_t val)
443
+ {
444
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
445
+ }
446
+
447
+ __extension__ extern __inline void
448
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
449
+ vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t val)
450
+ {
451
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
452
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vst1q_f32_x2 in gcc-8. */
2
+
3
+ __extension__ extern __inline void
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
6
+ {
7
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
8
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+
8
+ #include <ATen/cpu/vec/vec_base.h>
9
+ #if !(defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_ZVECTOR))
10
+ #include <ATen/cpu/vec/vec256/vec256_float.h>
11
+ #include <ATen/cpu/vec/vec256/vec256_float_neon.h>
12
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
13
+ #include <ATen/cpu/vec/vec256/vec256_double.h>
14
+ #include <ATen/cpu/vec/vec256/vec256_int.h>
15
+ #include <ATen/cpu/vec/vec256/vec256_qint.h>
16
+ #include <ATen/cpu/vec/vec256/vec256_complex_float.h>
17
+ #include <ATen/cpu/vec/vec256/vec256_complex_double.h>
18
+ #elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX)
19
+ #include <ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h>
20
+ #else
21
+ #include <ATen/cpu/vec/vec256/zarch/vec256_zarch.h>
22
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
23
+ #endif
24
+
25
+ #include <algorithm>
26
+ #include <cstddef>
27
+ #include <cstdint>
28
+ #include <cstring>
29
+ #include <ostream>
30
+
31
+ namespace at::vec {
32
+
33
+ // Note [CPU_CAPABILITY namespace]
34
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
35
+ // This header, and all of its subheaders, will be compiled with
36
+ // different architecture flags for each supported set of vector
37
+ // intrinsics. So we need to make sure they aren't inadvertently
38
+ // linked together. We do this by declaring objects in an `inline
39
+ // namespace` which changes the name mangling, but can still be
40
+ // accessed as `at::vec`.
41
+ inline namespace CPU_CAPABILITY {
42
+
43
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
44
+ stream << val.val_;
45
+ return stream;
46
+ }
47
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
48
+ stream << static_cast<int>(val.val_);
49
+ return stream;
50
+ }
51
+ inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
52
+ stream << static_cast<unsigned int>(val.val_);
53
+ return stream;
54
+ }
55
+
56
+ template <typename T>
57
+ std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
58
+ T buf[Vectorized<T>::size()];
59
+ vec.store(buf);
60
+ stream << "vec[";
61
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
62
+ if (i != 0) {
63
+ stream << ", ";
64
+ }
65
+ stream << buf[i];
66
+ }
67
+ stream << "]";
68
+ return stream;
69
+ }
70
+
71
+
72
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
73
+
74
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
75
+
76
+ template<>
77
+ inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
78
+ return _mm256_castpd_ps(src);
79
+ }
80
+
81
+ template<>
82
+ inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
83
+ return _mm256_castps_pd(src);
84
+ }
85
+
86
+ template<>
87
+ inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
88
+ return _mm256_castsi256_ps(src);
89
+ }
90
+
91
+ template<>
92
+ inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
93
+ return _mm256_castsi256_pd(src);
94
+ }
95
+
96
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
97
+
98
+ template<int64_t scale = 1>
99
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
100
+ inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
101
+ return _mm256_i64gather_pd(base_addr, vindex, scale);
102
+ }
103
+
104
+ template<int64_t scale = 1>
105
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
106
+ inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
107
+ return _mm256_i32gather_ps(base_addr, vindex, scale);
108
+ }
109
+
110
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
111
+
112
+ template<int64_t scale = 1>
113
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
114
+ inline mask_gather(const Vectorized<double>& src, const double* base_addr,
115
+ const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
116
+ return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale);
117
+ }
118
+
119
+ template<int64_t scale = 1>
120
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
121
+ inline mask_gather(const Vectorized<float>& src, const float* base_addr,
122
+ const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
123
+ return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale);
124
+ }
125
+
126
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
127
+
128
+ // Only works for inputs in the range: [-2^51, 2^51]
129
+ // From: https://stackoverflow.com/a/41148578
130
+ template<>
131
+ Vectorized<int64_t>
132
+ inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
133
+ auto x = _mm256_add_pd(src, _mm256_set1_pd(0x0018000000000000));
134
+ return _mm256_sub_epi64(
135
+ _mm256_castpd_si256(x),
136
+ _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000))
137
+ );
138
+ }
139
+
140
+ template<>
141
+ Vectorized<int32_t>
142
+ inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
143
+ return _mm256_cvttps_epi32(src);
144
+ }
145
+
146
+ // Only works for inputs in the range: [-2^51, 2^51]
147
+ // From: https://stackoverflow.com/a/41148578
148
+ template<>
149
+ Vectorized<double>
150
+ inline convert_to_fp_of_same_size<double>(const Vectorized<int64_t> &src) {
151
+ auto x = _mm256_add_epi64(src, _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000)));
152
+ return _mm256_sub_pd(
153
+ _mm256_castsi256_pd(x),
154
+ _mm256_set1_pd(0x0018000000000000)
155
+ );
156
+ }
157
+
158
+ template<>
159
+ Vectorized<float>
160
+ inline convert_to_fp_of_same_size<float>(const Vectorized<int32_t> &src) {
161
+ return _mm256_cvtepi32_ps(src);
162
+ }
163
+
164
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
165
+
166
+ template <>
167
+ std::pair<Vectorized<double>, Vectorized<double>>
168
+ inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
169
+ // inputs:
170
+ // a = {a0, a1, a3, a3}
171
+ // b = {b0, b1, b2, b3}
172
+
173
+ // swap lanes:
174
+ // a_swapped = {a0, a1, b0, b1}
175
+ // b_swapped = {a2, a3, b2, b3}
176
+ auto a_swapped = _mm256_permute2f128_pd(a, b, 0b0100000); // 0, 2. 4 bits apart
177
+ auto b_swapped = _mm256_permute2f128_pd(a, b, 0b0110001); // 1, 3. 4 bits apart
178
+
179
+ // group cols crossing lanes:
180
+ // return {a0, b0, a1, b1}
181
+ // {a2, b2, a3, b3}
182
+ return std::make_pair(_mm256_permute4x64_pd(a_swapped, 0b11011000), // 0, 2, 1, 3
183
+ _mm256_permute4x64_pd(b_swapped, 0b11011000)); // 0, 2, 1, 3
184
+ }
185
+
186
+ template <>
187
+ std::pair<Vectorized<float>, Vectorized<float>>
188
+ inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
189
+ // inputs:
190
+ // a = {a0, a1, a2, a3, a4, a5, a6, a7}
191
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7}
192
+
193
+ // swap lanes:
194
+ // a_swapped = {a0, a1, a2, a3, b0, b1, b2, b3}
195
+ // b_swapped = {a4, a5, a6, a7, b4, b5, b6, b7}
196
+ // TODO: can we support caching this?
197
+ auto a_swapped = _mm256_permute2f128_ps(a, b, 0b0100000); // 0, 2. 4 bits apart
198
+ auto b_swapped = _mm256_permute2f128_ps(a, b, 0b0110001); // 1, 3. 4 bits apart
199
+
200
+ // group cols crossing lanes:
201
+ // return {a0, b0, a1, b1, a2, b2, a3, b3}
202
+ // {a4, b4, a5, b5, a6, b6, a7, b7}
203
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
204
+ return std::make_pair(_mm256_permutevar8x32_ps(a_swapped, group_ctrl),
205
+ _mm256_permutevar8x32_ps(b_swapped, group_ctrl));
206
+ }
207
+
208
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
209
+
210
+ template <>
211
+ std::pair<Vectorized<double>, Vectorized<double>>
212
+ inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
213
+ // inputs:
214
+ // a = {a0, b0, a1, b1}
215
+ // b = {a2, b2, a3, b3}
216
+
217
+ // group cols crossing lanes:
218
+ // a_grouped = {a0, a1, b0, b1}
219
+ // b_grouped = {a2, a3, b2, b3}
220
+ auto a_grouped = _mm256_permute4x64_pd(a, 0b11011000); // 0, 2, 1, 3
221
+ auto b_grouped = _mm256_permute4x64_pd(b, 0b11011000); // 0, 2, 1, 3
222
+
223
+ // swap lanes:
224
+ // return {a0, a1, a2, a3}
225
+ // {b0, b1, b2, b3}
226
+ return std::make_pair(_mm256_permute2f128_pd(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
227
+ _mm256_permute2f128_pd(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
228
+ }
229
+
230
+ template <>
231
+ std::pair<Vectorized<float>, Vectorized<float>>
232
+ inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
233
+ // inputs:
234
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3}
235
+ // b = {a4, b4, a5, b5, a6, b6, a7, b7}
236
+
237
+ // group cols crossing lanes:
238
+ // a_grouped = {a0, a1, a2, a3, b0, b1, b2, b3}
239
+ // b_grouped = {a4, a5, a6, a7, b4, b5, b6, b7}
240
+ // TODO: can we support caching this?
241
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
242
+ auto a_grouped = _mm256_permutevar8x32_ps(a, group_ctrl);
243
+ auto b_grouped = _mm256_permutevar8x32_ps(b, group_ctrl);
244
+
245
+ // swap lanes:
246
+ // return {a0, a1, a2, a3, a4, a5, a6, a7}
247
+ // {b0, b1, b2, b3, b4, b5, b6, b7}
248
+ return std::make_pair(_mm256_permute2f128_ps(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
249
+ _mm256_permute2f128_ps(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
250
+ }
251
+
252
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
253
+
254
+ template<>
255
+ inline Vectorized<float> flip(const Vectorized<float> & v) {
256
+ const __m256i mask_float = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
257
+ return _mm256_permutevar8x32_ps(v, mask_float);
258
+ }
259
+
260
+ template<>
261
+ inline Vectorized<double> flip(const Vectorized<double> & v) {
262
+ return _mm256_permute4x64_pd(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
263
+ }
264
+
265
+ template<>
266
+ inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
267
+ return _mm256_permute4x64_epi64(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
268
+ }
269
+
270
+ template<>
271
+ inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
272
+ const __m256i mask_int32 = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
273
+ return _mm256_permutevar8x32_epi32(v, mask_int32);
274
+ }
275
+
276
+ template<>
277
+ inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
278
+ const __m256i mask = _mm256_set_epi8(
279
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
280
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
281
+ );
282
+ auto reversed = _mm256_shuffle_epi8(v, mask);
283
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
284
+ }
285
+
286
+ inline __m256i flip8(const __m256i & v) {
287
+ const __m256i mask_int8 = _mm256_set_epi8(
288
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
289
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
290
+ );
291
+ auto reversed = _mm256_shuffle_epi8(v, mask_int8);
292
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
293
+ }
294
+
295
+ template<>
296
+ inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
297
+ return flip8(v);
298
+ }
299
+
300
+ template<>
301
+ inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
302
+ return flip8(v);
303
+ }
304
+
305
+ #endif // (defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
306
+
307
+ }} // namepsace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at::vec {
14
+ // See Note [CPU_CAPABILITY namespace]
15
+ inline namespace CPU_CAPABILITY {
16
+
17
+
18
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<double> {
21
+ private:
22
+ __m256d values;
23
+ public:
24
+ using value_type = double;
25
+ using size_type = int;
26
+ static constexpr size_type size() {
27
+ return 4;
28
+ }
29
+ Vectorized() {}
30
+ Vectorized(__m256d v) : values(v) {}
31
+ Vectorized(double val) {
32
+ values = _mm256_set1_pd(val);
33
+ }
34
+ Vectorized(double val1, double val2, double val3, double val4) {
35
+ values = _mm256_setr_pd(val1, val2, val3, val4);
36
+ }
37
+ operator __m256d() const {
38
+ return values;
39
+ }
40
+ template <int64_t mask>
41
+ static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) {
42
+ return _mm256_blend_pd(a.values, b.values, mask);
43
+ }
44
+ static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b,
45
+ const Vectorized<double>& mask) {
46
+ return _mm256_blendv_pd(a.values, b.values, mask.values);
47
+ }
48
+ template<typename step_t>
49
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
50
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
51
+ }
52
+ static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b,
53
+ int64_t count = size()) {
54
+ switch (count) {
55
+ case 0:
56
+ return a;
57
+ case 1:
58
+ return blend<1>(a, b);
59
+ case 2:
60
+ return blend<3>(a, b);
61
+ case 3:
62
+ return blend<7>(a, b);
63
+ }
64
+ return b;
65
+ }
66
+ static Vectorized<double> loadu(const void* ptr, int64_t count = size()) {
67
+ if (count == size())
68
+ return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
69
+
70
+
71
+ __at_align__ double tmp_values[size()];
72
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
73
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
74
+ // instructions while a loop would be compiled to one instruction.
75
+ for (const auto i : c10::irange(size())) {
76
+ tmp_values[i] = 0.0;
77
+ }
78
+ std::memcpy(
79
+ tmp_values,
80
+ reinterpret_cast<const double*>(ptr),
81
+ count * sizeof(double));
82
+ return _mm256_load_pd(tmp_values);
83
+ }
84
+ void store(void* ptr, int count = size()) const {
85
+ if (count == size()) {
86
+ _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
87
+ } else if (count > 0) {
88
+ double tmp_values[size()];
89
+ _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
90
+ std::memcpy(ptr, tmp_values, count * sizeof(double));
91
+ }
92
+ }
93
+ const double& operator[](int idx) const = delete;
94
+ double& operator[](int idx) = delete;
95
+ int zero_mask() const {
96
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
97
+ __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ);
98
+ return _mm256_movemask_pd(cmp);
99
+ }
100
+ Vectorized<double> isnan() const {
101
+ return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q);
102
+ }
103
+ bool has_inf_nan() const {
104
+ __m256d self_sub = _mm256_sub_pd(values, values);
105
+ return (_mm256_movemask_epi8(_mm256_castpd_si256(self_sub)) & 0x77777777) != 0;
106
+ }
107
+ Vectorized<double> map(double (*const f)(double)) const {
108
+ __at_align__ double tmp[size()];
109
+ store(tmp);
110
+ for (const auto i : c10::irange(size())) {
111
+ tmp[i] = f(tmp[i]);
112
+ }
113
+ return loadu(tmp);
114
+ }
115
+ Vectorized<double> abs() const {
116
+ auto mask = _mm256_set1_pd(-0.f);
117
+ return _mm256_andnot_pd(mask, values);
118
+ }
119
+ Vectorized<double> angle() const {
120
+ const auto zero_vec = _mm256_set1_pd(0.f);
121
+ const auto nan_vec = _mm256_set1_pd(NAN);
122
+ const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ);
123
+ const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ);
124
+ const auto pi = _mm256_set1_pd(c10::pi<double>);
125
+
126
+ const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ);
127
+ auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask);
128
+ angle = _mm256_blendv_pd(angle, nan_vec, nan_mask);
129
+ return angle;
130
+ }
131
+ Vectorized<double> real() const {
132
+ return *this;
133
+ }
134
+ Vectorized<double> imag() const {
135
+ return _mm256_set1_pd(0);
136
+ }
137
+ Vectorized<double> conj() const {
138
+ return *this;
139
+ }
140
+ Vectorized<double> acos() const {
141
+ return Vectorized<double>(Sleef_acosd4_u10(values));
142
+ }
143
+ Vectorized<double> acosh() const {
144
+ return Vectorized<double>(Sleef_acoshd4_u10(values));
145
+ }
146
+ Vectorized<double> asin() const {
147
+ return Vectorized<double>(Sleef_asind4_u10(values));
148
+ }
149
+ Vectorized<double> atan() const {
150
+ return Vectorized<double>(Sleef_atand4_u10(values));
151
+ }
152
+ Vectorized<double> atanh() const {
153
+ return Vectorized<double>(Sleef_atanhd4_u10(values));
154
+ }
155
+ Vectorized<double> atan2(const Vectorized<double> &b) const {
156
+ return Vectorized<double>(Sleef_atan2d4_u10(values, b));
157
+ }
158
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
159
+ return Vectorized<double>(Sleef_copysignd4(values, sign));
160
+ }
161
+ Vectorized<double> erf() const {
162
+ return Vectorized<double>(Sleef_erfd4_u10(values));
163
+ }
164
+ Vectorized<double> erfc() const {
165
+ return Vectorized<double>(Sleef_erfcd4_u15(values));
166
+ }
167
+ Vectorized<double> erfinv() const {
168
+ return map(calc_erfinv);
169
+ }
170
+ Vectorized<double> exp() const {
171
+ return Vectorized<double>(Sleef_expd4_u10(values));
172
+ }
173
+ Vectorized<double> exp2() const {
174
+ return Vectorized<double>(Sleef_exp2d4_u10(values));
175
+ }
176
+ Vectorized<double> expm1() const {
177
+ return Vectorized<double>(Sleef_expm1d4_u10(values));
178
+ }
179
+ Vectorized<double> exp_u20() const {
180
+ return exp();
181
+ }
182
+ Vectorized<double> fmod(const Vectorized<double>& q) const {
183
+ return Vectorized<double>(Sleef_fmodd4(values, q));
184
+ }
185
+ Vectorized<double> hypot(const Vectorized<double> &b) const {
186
+ return Vectorized<double>(Sleef_hypotd4_u05(values, b));
187
+ }
188
+ Vectorized<double> i0() const {
189
+ return map(calc_i0);
190
+ }
191
+ Vectorized<double> i0e() const {
192
+ return map(calc_i0e);
193
+ }
194
+ Vectorized<double> digamma() const {
195
+ return map(calc_digamma);
196
+ }
197
+ Vectorized<double> igamma(const Vectorized<double> &x) const {
198
+ __at_align__ double tmp[size()];
199
+ __at_align__ double tmp_x[size()];
200
+ store(tmp);
201
+ x.store(tmp_x);
202
+ for (const auto i : c10::irange(size())) {
203
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
204
+ }
205
+ return loadu(tmp);
206
+ }
207
+ Vectorized<double> igammac(const Vectorized<double> &x) const {
208
+ __at_align__ double tmp[size()];
209
+ __at_align__ double tmp_x[size()];
210
+ store(tmp);
211
+ x.store(tmp_x);
212
+ for (const auto i : c10::irange(size())) {
213
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
214
+ }
215
+ return loadu(tmp);
216
+ }
217
+ Vectorized<double> log() const {
218
+ return Vectorized<double>(Sleef_logd4_u10(values));
219
+ }
220
+ Vectorized<double> log2() const {
221
+ return Vectorized<double>(Sleef_log2d4_u10(values));
222
+ }
223
+ Vectorized<double> log10() const {
224
+ return Vectorized<double>(Sleef_log10d4_u10(values));
225
+ }
226
+ Vectorized<double> log1p() const {
227
+ return Vectorized<double>(Sleef_log1pd4_u10(values));
228
+ }
229
+ Vectorized<double> sin() const {
230
+ return Vectorized<double>(Sleef_sind4_u10(values));
231
+ }
232
+ Vectorized<double> sinh() const {
233
+ return Vectorized<double>(Sleef_sinhd4_u10(values));
234
+ }
235
+ Vectorized<double> cos() const {
236
+ return Vectorized<double>(Sleef_cosd4_u10(values));
237
+ }
238
+ Vectorized<double> cosh() const {
239
+ return Vectorized<double>(Sleef_coshd4_u10(values));
240
+ }
241
+ Vectorized<double> ceil() const {
242
+ return _mm256_ceil_pd(values);
243
+ }
244
+ Vectorized<double> floor() const {
245
+ return _mm256_floor_pd(values);
246
+ }
247
+ Vectorized<double> frac() const;
248
+ Vectorized<double> neg() const {
249
+ return _mm256_xor_pd(_mm256_set1_pd(-0.), values);
250
+ }
251
+ Vectorized<double> nextafter(const Vectorized<double> &b) const {
252
+ return Vectorized<double>(Sleef_nextafterd4(values, b));
253
+ }
254
+ Vectorized<double> round() const {
255
+ return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
256
+ }
257
+ Vectorized<double> tan() const {
258
+ return Vectorized<double>(Sleef_tand4_u10(values));
259
+ }
260
+ Vectorized<double> tanh() const {
261
+ return Vectorized<double>(Sleef_tanhd4_u10(values));
262
+ }
263
+ Vectorized<double> trunc() const {
264
+ return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
265
+ }
266
+ Vectorized<double> lgamma() const {
267
+ return Vectorized<double>(Sleef_lgammad4_u10(values));
268
+ }
269
+ Vectorized<double> sqrt() const {
270
+ return _mm256_sqrt_pd(values);
271
+ }
272
+ Vectorized<double> reciprocal() const {
273
+ return _mm256_div_pd(_mm256_set1_pd(1), values);
274
+ }
275
+ Vectorized<double> rsqrt() const {
276
+ return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values));
277
+ }
278
+ Vectorized<double> pow(const Vectorized<double> &b) const {
279
+ return Vectorized<double>(Sleef_powd4_u10(values, b));
280
+ }
281
+ // Comparison using the _CMP_**_OQ predicate.
282
+ // `O`: get false if an operand is NaN
283
+ // `Q`: do not raise if an operand is NaN
284
+ Vectorized<double> operator==(const Vectorized<double>& other) const {
285
+ return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
286
+ }
287
+
288
+ Vectorized<double> operator!=(const Vectorized<double>& other) const {
289
+ return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
290
+ }
291
+
292
+ Vectorized<double> operator<(const Vectorized<double>& other) const {
293
+ return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ);
294
+ }
295
+
296
+ Vectorized<double> operator<=(const Vectorized<double>& other) const {
297
+ return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ);
298
+ }
299
+
300
+ Vectorized<double> operator>(const Vectorized<double>& other) const {
301
+ return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ);
302
+ }
303
+
304
+ Vectorized<double> operator>=(const Vectorized<double>& other) const {
305
+ return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ);
306
+ }
307
+
308
+ Vectorized<double> eq(const Vectorized<double>& other) const;
309
+ Vectorized<double> ne(const Vectorized<double>& other) const;
310
+ Vectorized<double> lt(const Vectorized<double>& other) const;
311
+ Vectorized<double> le(const Vectorized<double>& other) const;
312
+ Vectorized<double> gt(const Vectorized<double>& other) const;
313
+ Vectorized<double> ge(const Vectorized<double>& other) const;
314
+ };
315
+
316
+ template <>
317
+ Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) {
318
+ return _mm256_add_pd(a, b);
319
+ }
320
+
321
+ template <>
322
+ Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) {
323
+ return _mm256_sub_pd(a, b);
324
+ }
325
+
326
+ template <>
327
+ Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) {
328
+ return _mm256_mul_pd(a, b);
329
+ }
330
+
331
+ template <>
332
+ Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) {
333
+ return _mm256_div_pd(a, b);
334
+ }
335
+
336
+ // frac. Implement this here so we can use subtraction.
337
+ inline Vectorized<double> Vectorized<double>::frac() const {
338
+ return *this - this->trunc();
339
+ }
340
+
341
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
342
+ // either input is a NaN.
343
+ template <>
344
+ Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) {
345
+ Vectorized<double> max = _mm256_max_pd(a, b);
346
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
347
+ // Exploit the fact that all-ones is a NaN.
348
+ return _mm256_or_pd(max, isnan);
349
+ }
350
+
351
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
352
+ // either input is a NaN.
353
+ template <>
354
+ Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) {
355
+ Vectorized<double> min = _mm256_min_pd(a, b);
356
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
357
+ // Exploit the fact that all-ones is a NaN.
358
+ return _mm256_or_pd(min, isnan);
359
+ }
360
+
361
+ template <>
362
+ Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) {
363
+ return _mm256_min_pd(max, _mm256_max_pd(min, a));
364
+ }
365
+
366
+ template <>
367
+ Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) {
368
+ return _mm256_max_pd(min, a);
369
+ }
370
+
371
+ template <>
372
+ Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) {
373
+ return _mm256_min_pd(max, a);
374
+ }
375
+
376
+ template <>
377
+ Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) {
378
+ return _mm256_and_pd(a, b);
379
+ }
380
+
381
+ template <>
382
+ Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) {
383
+ return _mm256_or_pd(a, b);
384
+ }
385
+
386
+ template <>
387
+ Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) {
388
+ return _mm256_xor_pd(a, b);
389
+ }
390
+
391
+ inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const {
392
+ return (*this == other) & Vectorized<double>(1.0);
393
+ }
394
+
395
+ inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const {
396
+ return (*this != other) & Vectorized<double>(1.0);
397
+ }
398
+
399
+ inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const {
400
+ return (*this > other) & Vectorized<double>(1.0);
401
+ }
402
+
403
+ inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const {
404
+ return (*this >= other) & Vectorized<double>(1.0);
405
+ }
406
+
407
+ inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const {
408
+ return (*this < other) & Vectorized<double>(1.0);
409
+ }
410
+
411
+ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const {
412
+ return (*this <= other) & Vectorized<double>(1.0);
413
+ }
414
+
415
+ template <>
416
+ inline void convert(const double* src, double* dst, int64_t n) {
417
+ int64_t i;
418
+ #pragma unroll
419
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
420
+ _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i));
421
+ }
422
+ #pragma unroll
423
+ for (; i < n; i++) {
424
+ dst[i] = src[i];
425
+ }
426
+ }
427
+
428
+ #ifdef CPU_CAPABILITY_AVX2
429
+ template <>
430
+ Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
431
+ return _mm256_fmadd_pd(a, b, c);
432
+ }
433
+
434
+ template <>
435
+ Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
436
+ return _mm256_fmsub_pd(a, b, c);
437
+ }
438
+ #endif
439
+
440
+ #endif
441
+
442
+ }} // namespace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at::vec {
14
+ // See Note [CPU_CAPABILITY namespace]
15
+ inline namespace CPU_CAPABILITY {
16
+
17
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
18
+
19
+ template <> class Vectorized<float> {
20
+ private:
21
+ __m256 values;
22
+ public:
23
+ using value_type = float;
24
+ using size_type = int;
25
+ static constexpr size_type size() {
26
+ return 8;
27
+ }
28
+ Vectorized() {}
29
+ Vectorized(__m256 v) : values(v) {}
30
+ Vectorized(float val) {
31
+ values = _mm256_set1_ps(val);
32
+ }
33
+ Vectorized(float val1, float val2, float val3, float val4,
34
+ float val5, float val6, float val7, float val8) {
35
+ values = _mm256_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8);
36
+ }
37
+ operator __m256() const {
38
+ return values;
39
+ }
40
+ template <int64_t mask>
41
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
42
+ return _mm256_blend_ps(a.values, b.values, mask);
43
+ }
44
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
45
+ const Vectorized<float>& mask) {
46
+ return _mm256_blendv_ps(a.values, b.values, mask.values);
47
+ }
48
+ template<typename step_t>
49
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
50
+ return Vectorized<float>(
51
+ base, base + step, base + 2 * step, base + 3 * step,
52
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
53
+ }
54
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
55
+ int64_t count = size()) {
56
+ switch (count) {
57
+ case 0:
58
+ return a;
59
+ case 1:
60
+ return blend<1>(a, b);
61
+ case 2:
62
+ return blend<3>(a, b);
63
+ case 3:
64
+ return blend<7>(a, b);
65
+ case 4:
66
+ return blend<15>(a, b);
67
+ case 5:
68
+ return blend<31>(a, b);
69
+ case 6:
70
+ return blend<63>(a, b);
71
+ case 7:
72
+ return blend<127>(a, b);
73
+ }
74
+ return b;
75
+ }
76
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
77
+ if (count == size())
78
+ return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
79
+ __at_align__ float tmp_values[size()];
80
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
81
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
82
+ // instructions while a loop would be compiled to one instruction.
83
+ for (const auto i : c10::irange(size())) {
84
+ tmp_values[i] = 0.0;
85
+ }
86
+ std::memcpy(
87
+ tmp_values, reinterpret_cast<const float*>(ptr), count * sizeof(float));
88
+ return _mm256_loadu_ps(tmp_values);
89
+ }
90
+ void store(void* ptr, int64_t count = size()) const {
91
+ if (count == size()) {
92
+ _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
93
+ } else if (count > 0) {
94
+ float tmp_values[size()];
95
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
96
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
97
+ }
98
+ }
99
+ const float& operator[](int idx) const = delete;
100
+ float& operator[](int idx) = delete;
101
+ int zero_mask() const {
102
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
103
+ __m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
104
+ return _mm256_movemask_ps(cmp);
105
+ }
106
+ Vectorized<float> isnan() const {
107
+ return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
108
+ }
109
+
110
+ bool has_inf_nan() const {
111
+ __m256 self_sub = _mm256_sub_ps(values, values);
112
+ return (_mm256_movemask_epi8(_mm256_castps_si256(self_sub)) & 0x77777777) != 0;
113
+ }
114
+
115
+ Vectorized<float> map(float (*const f)(float)) const {
116
+ __at_align__ float tmp[size()];
117
+ store(tmp);
118
+ for (const auto i : c10::irange(size())) {
119
+ tmp[i] = f(tmp[i]);
120
+ }
121
+ return loadu(tmp);
122
+ }
123
+ Vectorized<float> abs() const {
124
+ auto mask = _mm256_set1_ps(-0.f);
125
+ return _mm256_andnot_ps(mask, values);
126
+ }
127
+ Vectorized<float> angle() const {
128
+ const auto zero_vec = _mm256_set1_ps(0.f);
129
+ const auto nan_vec = _mm256_set1_ps(NAN);
130
+ const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ);
131
+ const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
132
+ const auto pi = _mm256_set1_ps(c10::pi<float>);
133
+
134
+ const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ);
135
+ auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
136
+ angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
137
+ return angle;
138
+ }
139
+ Vectorized<float> real() const {
140
+ return *this;
141
+ }
142
+ Vectorized<float> imag() const {
143
+ return _mm256_set1_ps(0);
144
+ }
145
+ Vectorized<float> conj() const {
146
+ return *this;
147
+ }
148
+ Vectorized<float> acos() const {
149
+ return Vectorized<float>(Sleef_acosf8_u10(values));
150
+ }
151
+ Vectorized<float> acosh() const {
152
+ return Vectorized<float>(Sleef_acoshf8_u10(values));
153
+ }
154
+ Vectorized<float> asin() const {
155
+ return Vectorized<float>(Sleef_asinf8_u10(values));
156
+ }
157
+ Vectorized<float> atan() const {
158
+ return Vectorized<float>(Sleef_atanf8_u10(values));
159
+ }
160
+ Vectorized<float> atanh() const {
161
+ return Vectorized<float>(Sleef_atanhf8_u10(values));
162
+ }
163
+ Vectorized<float> atan2(const Vectorized<float> &b) const {
164
+ return Vectorized<float>(Sleef_atan2f8_u10(values, b));
165
+ }
166
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
167
+ return Vectorized<float>(Sleef_copysignf8(values, sign));
168
+ }
169
+ Vectorized<float> erf() const {
170
+ // constants
171
+ const auto neg_zero_vec = _mm256_set1_ps(-0.f);
172
+ const auto one_vec = _mm256_set1_ps(1.0f);
173
+ const auto p = _mm256_set1_ps(0.3275911f);
174
+ const auto p1 = _mm256_set1_ps(0.254829592f);
175
+ const auto p2 = _mm256_set1_ps(-0.284496736f);
176
+ const auto p3 = _mm256_set1_ps(1.421413741f);
177
+ const auto p4 = _mm256_set1_ps(-1.453152027f);
178
+ const auto p5 = _mm256_set1_ps(1.061405429f);
179
+ // sign(x)
180
+ auto sign_mask = _mm256_and_ps(neg_zero_vec, values);
181
+ auto abs_vec = _mm256_xor_ps(sign_mask, values);
182
+ // t = 1 / (p * abs(x) + 1)
183
+ auto tmp0 = _mm256_fmadd_ps(p, abs_vec, one_vec);
184
+ auto t = _mm256_div_ps(one_vec, tmp0);
185
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
186
+ auto tmp1 = _mm256_fmadd_ps(p5, t, p4);
187
+ auto tmp2 = _mm256_fmadd_ps(tmp1, t, p3);
188
+ auto tmp3 = _mm256_fmadd_ps(tmp2, t, p2);
189
+ auto r = _mm256_fmadd_ps(tmp3, t, p1);
190
+ // - exp(- x * x)
191
+ auto pow_2 = _mm256_mul_ps(values, values);
192
+ auto neg_pow_2 = _mm256_xor_ps(neg_zero_vec, pow_2);
193
+ // auto tmp4 = exp(neg_pow_2);
194
+ auto tmp4 = Vectorized<float>(Sleef_expf8_u10(neg_pow_2));
195
+ auto tmp5 = _mm256_xor_ps(neg_zero_vec, tmp4);
196
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
197
+ auto tmp6 = _mm256_mul_ps(tmp5, t);
198
+ auto tmp7 = _mm256_fmadd_ps(tmp6, r, one_vec);
199
+ return _mm256_xor_ps(sign_mask, tmp7);
200
+ }
201
+ Vectorized<float> erfc() const {
202
+ return Vectorized<float>(Sleef_erfcf8_u15(values));
203
+ }
204
+ Vectorized<float> erfinv() const {
205
+ return map(calc_erfinv);
206
+ }
207
+ Vectorized<float> exp() const {
208
+ return Vectorized<float>(Sleef_expf8_u10(values));
209
+ }
210
+ Vectorized<float> exp2() const {
211
+ return Vectorized<float>(Sleef_exp2f8_u10(values));
212
+ }
213
+ Vectorized<float> expm1() const {
214
+ return Vectorized<float>(Sleef_expm1f8_u10(values));
215
+ }
216
+ Vectorized<float> exp_u20() const {
217
+ // A faster version of exp with ULP=20
218
+ static __m256 vec_factorial_1 =
219
+ _mm256_set1_ps(0.999999701f); // 1/factorial(1)
220
+ static __m256 vec_factorial_2 =
221
+ _mm256_set1_ps(0.499991506f); // 1/factorial(2)
222
+ static __m256 vec_factorial_3 =
223
+ _mm256_set1_ps(0.166676521f); // 1/factorial(3)
224
+ static __m256 vec_factorial_4 =
225
+ _mm256_set1_ps(0.0418978221f); // 1/factorial(4)
226
+ static __m256 vec_factorial_5 =
227
+ _mm256_set1_ps(0.00828929059f); // 1/factorial(5)
228
+ static __m256 vec_exp_log2ef =
229
+ (__m256)_mm256_set1_epi32(0x3fb8aa3b); // log2(e)
230
+ static __m256 vec_half = _mm256_set1_ps(0.5f);
231
+ static __m256 vec_one = _mm256_set1_ps(1.f);
232
+ static __m256 vec_zero = _mm256_set1_ps(0.f);
233
+ static __m256 vec_two = _mm256_set1_ps(2.f);
234
+ static __m256 vec_ln2f = (__m256)_mm256_set1_epi32(0x3f317218); // ln(2)
235
+ static __m256 vec_ln_flt_min = (__m256)_mm256_set1_epi32(0xc2aeac50);
236
+ static __m256 vec_ln_flt_max = (__m256)_mm256_set1_epi32(0x42b17218);
237
+ static __m256i vec_127 = _mm256_set1_epi32(0x0000007f);
238
+ static int n_mantissa_bits = 23;
239
+
240
+ // exp(x) =
241
+ // = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem
242
+ // = 2^n * exp(r) // simplify the exp(n*ln(2)) expression
243
+
244
+ auto less_ln_flt_min_mask =
245
+ _mm256_cmp_ps(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/);
246
+ auto vec_src = _mm256_min_ps(values, vec_ln_flt_max);
247
+ vec_src = _mm256_max_ps(vec_src, vec_ln_flt_min);
248
+
249
+ // fx = floorf(x * log2ef + 0.5)
250
+ auto vec_fx = _mm256_fmadd_ps(vec_src, vec_exp_log2ef, vec_half);
251
+ vec_fx = _mm256_floor_ps(vec_fx);
252
+
253
+ // x = x - fx * ln2
254
+ auto vec_exp_poly = _mm256_fnmadd_ps(vec_fx, vec_ln2f, vec_src);
255
+
256
+ // compute polynomial
257
+ auto vec_res =
258
+ _mm256_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4);
259
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3);
260
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2);
261
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1);
262
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_one);
263
+
264
+ // compute 2^(n-1)
265
+ auto vec_exp_number = _mm256_sub_ps(vec_fx, vec_one);
266
+ auto vec_exp_number_i = _mm256_cvtps_epi32(vec_exp_number);
267
+ auto vec_two_pow_n_i = _mm256_add_epi32(vec_exp_number_i, vec_127);
268
+ vec_two_pow_n_i = _mm256_slli_epi32(vec_two_pow_n_i, n_mantissa_bits);
269
+ auto vec_two_pow_n = (__m256)vec_two_pow_n_i;
270
+ vec_two_pow_n =
271
+ _mm256_blendv_ps(vec_two_pow_n, vec_zero, less_ln_flt_min_mask);
272
+
273
+ // y = y * 2^n
274
+ vec_res = _mm256_mul_ps(vec_res, vec_two_pow_n);
275
+ vec_res = _mm256_mul_ps(vec_res, vec_two);
276
+ return vec_res;
277
+ }
278
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
279
+ return Vectorized<float>(Sleef_fmodf8(values, q));
280
+ }
281
+ Vectorized<float> log() const {
282
+ return Vectorized<float>(Sleef_logf8_u10(values));
283
+ }
284
+ Vectorized<float> log2() const {
285
+ return Vectorized<float>(Sleef_log2f8_u10(values));
286
+ }
287
+ Vectorized<float> log10() const {
288
+ return Vectorized<float>(Sleef_log10f8_u10(values));
289
+ }
290
+ Vectorized<float> log1p() const {
291
+ return Vectorized<float>(Sleef_log1pf8_u10(values));
292
+ }
293
+ Vectorized<float> frac() const;
294
+ Vectorized<float> sin() const {
295
+ return Vectorized<float>(Sleef_sinf8_u35(values));
296
+ }
297
+ Vectorized<float> sinh() const {
298
+ return Vectorized<float>(Sleef_sinhf8_u10(values));
299
+ }
300
+ Vectorized<float> cos() const {
301
+ return Vectorized<float>(Sleef_cosf8_u35(values));
302
+ }
303
+ Vectorized<float> cosh() const {
304
+ return Vectorized<float>(Sleef_coshf8_u10(values));
305
+ }
306
+ Vectorized<float> ceil() const {
307
+ return _mm256_ceil_ps(values);
308
+ }
309
+ Vectorized<float> floor() const {
310
+ return _mm256_floor_ps(values);
311
+ }
312
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
313
+ return Vectorized<float>(Sleef_hypotf8_u05(values, b));
314
+ }
315
+ Vectorized<float> i0() const {
316
+ return map(calc_i0);
317
+ }
318
+ Vectorized<float> i0e() const {
319
+ return map(calc_i0e);
320
+ }
321
+ Vectorized<float> digamma() const {
322
+ return map(calc_digamma);
323
+ }
324
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
325
+ __at_align__ float tmp[size()];
326
+ __at_align__ float tmp_x[size()];
327
+ store(tmp);
328
+ x.store(tmp_x);
329
+ for (const auto i : c10::irange(size())) {
330
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
331
+ }
332
+ return loadu(tmp);
333
+ }
334
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
335
+ __at_align__ float tmp[size()];
336
+ __at_align__ float tmp_x[size()];
337
+ store(tmp);
338
+ x.store(tmp_x);
339
+ for (const auto i : c10::irange(size())) {
340
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
341
+ }
342
+ return loadu(tmp);
343
+ }
344
+ Vectorized<float> neg() const {
345
+ return _mm256_xor_ps(_mm256_set1_ps(-0.f), values);
346
+ }
347
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
348
+ return Vectorized<float>(Sleef_nextafterf8(values, b));
349
+ }
350
+ Vectorized<float> round() const {
351
+ return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
352
+ }
353
+ Vectorized<float> tan() const {
354
+ return Vectorized<float>(Sleef_tanf8_u10(values));
355
+ }
356
+ Vectorized<float> tanh() const {
357
+ return Vectorized<float>(Sleef_tanhf8_u10(values));
358
+ }
359
+ Vectorized<float> trunc() const {
360
+ return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
361
+ }
362
+ Vectorized<float> lgamma() const {
363
+ return Vectorized<float>(Sleef_lgammaf8_u10(values));
364
+ }
365
+ Vectorized<float> sqrt() const {
366
+ return _mm256_sqrt_ps(values);
367
+ }
368
+ Vectorized<float> reciprocal() const {
369
+ return _mm256_div_ps(_mm256_set1_ps(1), values);
370
+ }
371
+ Vectorized<float> rsqrt() const {
372
+ return _mm256_div_ps(_mm256_set1_ps(1), _mm256_sqrt_ps(values));
373
+ }
374
+ Vectorized<float> pow(const Vectorized<float> &b) const {
375
+ return Vectorized<float>(Sleef_powf8_u10(values, b));
376
+ }
377
+ // Comparison using the _CMP_**_OQ predicate.
378
+ // `O`: get false if an operand is NaN
379
+ // `Q`: do not raise if an operand is NaN
380
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
381
+ return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
382
+ }
383
+
384
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
385
+ return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
386
+ }
387
+
388
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
389
+ return _mm256_cmp_ps(values, other.values, _CMP_LT_OQ);
390
+ }
391
+
392
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
393
+ return _mm256_cmp_ps(values, other.values, _CMP_LE_OQ);
394
+ }
395
+
396
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
397
+ return _mm256_cmp_ps(values, other.values, _CMP_GT_OQ);
398
+ }
399
+
400
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
401
+ return _mm256_cmp_ps(values, other.values, _CMP_GE_OQ);
402
+ }
403
+
404
+ Vectorized<float> eq(const Vectorized<float>& other) const;
405
+ Vectorized<float> ne(const Vectorized<float>& other) const;
406
+ Vectorized<float> gt(const Vectorized<float>& other) const;
407
+ Vectorized<float> ge(const Vectorized<float>& other) const;
408
+ Vectorized<float> lt(const Vectorized<float>& other) const;
409
+ Vectorized<float> le(const Vectorized<float>& other) const;
410
+ };
411
+
412
+ template <>
413
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
414
+ return _mm256_add_ps(a, b);
415
+ }
416
+
417
+ template <>
418
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
419
+ return _mm256_sub_ps(a, b);
420
+ }
421
+
422
+ template <>
423
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
424
+ return _mm256_mul_ps(a, b);
425
+ }
426
+
427
+ template <>
428
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
429
+ return _mm256_div_ps(a, b);
430
+ }
431
+
432
+ // frac. Implement this here so we can use subtraction
433
+ inline Vectorized<float> Vectorized<float>::frac() const {
434
+ return *this - this->trunc();
435
+ }
436
+
437
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
438
+ // either input is a NaN.
439
+ template <>
440
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
441
+ Vectorized<float> max = _mm256_max_ps(a, b);
442
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
443
+ // Exploit the fact that all-ones is a NaN.
444
+ return _mm256_or_ps(max, isnan);
445
+ }
446
+
447
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
448
+ // either input is a NaN.
449
+ template <>
450
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
451
+ Vectorized<float> min = _mm256_min_ps(a, b);
452
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
453
+ // Exploit the fact that all-ones is a NaN.
454
+ return _mm256_or_ps(min, isnan);
455
+ }
456
+
457
+ template <>
458
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
459
+ return _mm256_min_ps(max, _mm256_max_ps(min, a));
460
+ }
461
+
462
+ template <>
463
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
464
+ return _mm256_min_ps(max, a);
465
+ }
466
+
467
+ template <>
468
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
469
+ return _mm256_max_ps(min, a);
470
+ }
471
+
472
+ template <>
473
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
474
+ return _mm256_and_ps(a, b);
475
+ }
476
+
477
+ template <>
478
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
479
+ return _mm256_or_ps(a, b);
480
+ }
481
+
482
+ template <>
483
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
484
+ return _mm256_xor_ps(a, b);
485
+ }
486
+
487
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
488
+ return (*this == other) & Vectorized<float>(1.0f);
489
+ }
490
+
491
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
492
+ return (*this != other) & Vectorized<float>(1.0f);
493
+ }
494
+
495
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
496
+ return (*this > other) & Vectorized<float>(1.0f);
497
+ }
498
+
499
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
500
+ return (*this >= other) & Vectorized<float>(1.0f);
501
+ }
502
+
503
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
504
+ return (*this < other) & Vectorized<float>(1.0f);
505
+ }
506
+
507
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
508
+ return (*this <= other) & Vectorized<float>(1.0f);
509
+ }
510
+
511
+ template <>
512
+ inline void convert(const float* src, float* dst, int64_t n) {
513
+ int64_t i;
514
+ #pragma unroll
515
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
516
+ _mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i));
517
+ }
518
+ #pragma unroll
519
+ for (; i < n; i++) {
520
+ dst[i] = src[i];
521
+ }
522
+ }
523
+
524
+
525
+ template <>
526
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
527
+ return _mm256_fmadd_ps(a, b, c);
528
+ }
529
+
530
+ template <>
531
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
532
+ return _mm256_fmsub_ps(a, b, c);
533
+ }
534
+
535
+ // Used by Inductor CPP codegen
536
+ template<>
537
+ inline void transpose_mxn<float, 8, 8>(
538
+ const float* src,
539
+ int64_t ld_src,
540
+ float* dst,
541
+ int64_t ld_dst) {
542
+ // load from src to registers
543
+ // a: a0 a1 a2 a3 a4 a5 a6 a7
544
+ // b: b0 b1 b2 b3 b4 b5 b6 b7
545
+ // c: c0 c1 c2 c3 c4 c5 c6 c7
546
+ // d: d0 d1 d2 d3 d4 d5 d6 d7
547
+ // e: e0 e1 e2 e3 e4 e5 e6 e7
548
+ // f: f0 f1 f2 f3 f4 f5 f6 f7
549
+ // g: g0 g1 g2 g3 g4 g5 g6 g7
550
+ // h: h0 h1 h2 h3 h4 h5 h6 h7
551
+ __m256 a = _mm256_loadu_ps(&src[0 * ld_src]);
552
+ __m256 b = _mm256_loadu_ps(&src[1 * ld_src]);
553
+ __m256 c = _mm256_loadu_ps(&src[2 * ld_src]);
554
+ __m256 d = _mm256_loadu_ps(&src[3 * ld_src]);
555
+ __m256 e = _mm256_loadu_ps(&src[4 * ld_src]);
556
+ __m256 f = _mm256_loadu_ps(&src[5 * ld_src]);
557
+ __m256 g = _mm256_loadu_ps(&src[6 * ld_src]);
558
+ __m256 h = _mm256_loadu_ps(&src[7 * ld_src]);
559
+
560
+ __m256 ta, tb, tc, td, te, tf, tg, th;
561
+ // unpacking and interleaving 32-bit elements
562
+ // a0 b0 a1 b1 a4 b4 a5 b5
563
+ // a2 b2 a3 b3 a6 b6 a7 b7
564
+ // c0 d0 c1 d1 ...
565
+ // c2 d2 c3 d3 ...
566
+ // e0 f0 e1 f1 ...
567
+ // e2 f2 e3 f3 ...
568
+ // g0 h0 g1 h1 ...
569
+ // g2 h2 g3 h3 ...
570
+ ta = _mm256_unpacklo_ps(a, b);
571
+ tb = _mm256_unpackhi_ps(a, b);
572
+ tc = _mm256_unpacklo_ps(c, d);
573
+ td = _mm256_unpackhi_ps(c, d);
574
+ te = _mm256_unpacklo_ps(e, f);
575
+ tf = _mm256_unpackhi_ps(e, f);
576
+ tg = _mm256_unpacklo_ps(g, h);
577
+ th = _mm256_unpackhi_ps(g, h);
578
+
579
+ // unpacking and interleaving 64-bit elements
580
+ // a0 b0 c0 d0 a4 b4 c4 d4
581
+ // a1 b1 c1 d1 ...
582
+ // a2 b2 c2 d2 ...
583
+ // a3 b3 c3 d3 ...
584
+ // e0 f0 g0 h0 e4 f4 g4 h4
585
+ // e1 f1 g1 h1 ...
586
+ // e2 f2 g2 h2 ...
587
+ // e3 f3 g3 h3 ...
588
+ a = _mm256_castpd_ps(
589
+ _mm256_unpacklo_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
590
+ b = _mm256_castpd_ps(
591
+ _mm256_unpackhi_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
592
+ c = _mm256_castpd_ps(
593
+ _mm256_unpacklo_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
594
+ d = _mm256_castpd_ps(
595
+ _mm256_unpackhi_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
596
+ e = _mm256_castpd_ps(
597
+ _mm256_unpacklo_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
598
+ f = _mm256_castpd_ps(
599
+ _mm256_unpackhi_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
600
+ g = _mm256_castpd_ps(
601
+ _mm256_unpacklo_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
602
+ h = _mm256_castpd_ps(
603
+ _mm256_unpackhi_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
604
+
605
+ // shuffle 128-bits (composed of 4 32-bit elements)
606
+ // a0 b0 c0 d0 e0 f0 g0 h0
607
+ // a1 b1 c1 d1 ...
608
+ // a2 b2 c2 d2 ...
609
+ // a3 b3 c3 d3 ...
610
+ // a4 b4 c4 d4 ...
611
+ // a5 b5 c5 d5 ...
612
+ // a6 b6 c6 d6 ...
613
+ // a7 b7 c7 d7 ...
614
+ ta = _mm256_permute2f128_ps(a, e, 0x20);
615
+ tb = _mm256_permute2f128_ps(b, f, 0x20);
616
+ tc = _mm256_permute2f128_ps(c, g, 0x20);
617
+ td = _mm256_permute2f128_ps(d, h, 0x20);
618
+ te = _mm256_permute2f128_ps(a, e, 0x31);
619
+ tf = _mm256_permute2f128_ps(b, f, 0x31);
620
+ tg = _mm256_permute2f128_ps(c, g, 0x31);
621
+ th = _mm256_permute2f128_ps(d, h, 0x31);
622
+
623
+ // store from registers to dst
624
+ _mm256_storeu_ps(&dst[0 * ld_dst], ta);
625
+ _mm256_storeu_ps(&dst[1 * ld_dst], tb);
626
+ _mm256_storeu_ps(&dst[2 * ld_dst], tc);
627
+ _mm256_storeu_ps(&dst[3 * ld_dst], td);
628
+ _mm256_storeu_ps(&dst[4 * ld_dst], te);
629
+ _mm256_storeu_ps(&dst[5 * ld_dst], tf);
630
+ _mm256_storeu_ps(&dst[6 * ld_dst], tg);
631
+ _mm256_storeu_ps(&dst[7 * ld_dst], th);
632
+ }
633
+
634
+ #endif
635
+
636
+ }} // namespace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h ADDED
@@ -0,0 +1,892 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(__aarch64__) && defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ // Sleef offers vectorized versions of some transcedentals
15
+ // such as sin, cos, tan etc..
16
+ // However for now opting for STL, since we are not building
17
+ // with Sleef for mobile yet.
18
+
19
+ namespace at::vec {
20
+ // See Note [CPU_CAPABILITY namespace]
21
+ inline namespace CPU_CAPABILITY {
22
+
23
+ // Right now contains only aarch64 implementation.
24
+ // Due to follow two reasons aarch32 is not currently supported.
25
+ // 1. Due to difference in ISA been aarch32 and aarch64, intrinsics
26
+ // that work for aarch64 dont work for aarch32.
27
+ // 2. Android NDK r21 has problems with compiling aarch32.
28
+ // Clang seg faults.
29
+ // https://github.com/android/ndk/issues/1248
30
+ // https://bugs.llvm.org/show_bug.cgi?id=45824
31
+ // Most likely we will do aarch32 support with inline asm.
32
+ #if defined(__aarch64__)
33
+
34
+ #ifdef __BIG_ENDIAN__
35
+ #error "Big endian is not supported."
36
+ #endif
37
+
38
+ #if defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
39
+ #define USE_SLEEF(sleef_code, non_sleef_code) sleef_code
40
+ #else
41
+ #define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code
42
+ #endif
43
+
44
+ template<int index, bool mask_val>
45
+ struct BlendRegs {
46
+ static float32x4_t impl(
47
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res);
48
+ };
49
+
50
+ template<int index>
51
+ struct BlendRegs<index, true>{
52
+ static float32x4_t impl(
53
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
54
+ return vsetq_lane_f32(vgetq_lane_f32(b, index), res, index);
55
+ }
56
+ };
57
+
58
+ template<int index>
59
+ struct BlendRegs<index, false>{
60
+ static float32x4_t impl(
61
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
62
+ return vsetq_lane_f32(vgetq_lane_f32(a, index), res, index);
63
+ }
64
+ };
65
+
66
+ template <> class Vectorized<float> {
67
+ private:
68
+ float32x4x2_t values;
69
+ public:
70
+ using value_type = float;
71
+ using size_type = int;
72
+ static constexpr size_type size() {
73
+ return 8;
74
+ }
75
+ Vectorized() {}
76
+ Vectorized(float32x4x2_t v) : values(v) {}
77
+ Vectorized(float val) : values{vdupq_n_f32(val), vdupq_n_f32(val) } {}
78
+ Vectorized(float val0, float val1, float val2, float val3,
79
+ float val4, float val5, float val6, float val7) :
80
+ values{val0, val1, val2, val3, val4, val5, val6, val7} {}
81
+ Vectorized(float32x4_t val0, float32x4_t val1) : values{val0, val1} {}
82
+ operator float32x4x2_t() const {
83
+ return values;
84
+ }
85
+ template <int64_t mask>
86
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
87
+ Vectorized<float> vec;
88
+ // 0.
89
+ vec.values.val[0] =
90
+ BlendRegs<0, (mask & 0x01)!=0>::impl(
91
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
92
+ vec.values.val[0] =
93
+ BlendRegs<1, (mask & 0x02)!=0>::impl(
94
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
95
+ vec.values.val[0] =
96
+ BlendRegs<2, (mask & 0x04)!=0>::impl(
97
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
98
+ vec.values.val[0] =
99
+ BlendRegs<3, (mask & 0x08)!=0>::impl(
100
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
101
+ // 1.
102
+ vec.values.val[1] =
103
+ BlendRegs<0, (mask & 0x10)!=0>::impl(
104
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
105
+ vec.values.val[1] =
106
+ BlendRegs<1, (mask & 0x20)!=0>::impl(
107
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
108
+ vec.values.val[1] =
109
+ BlendRegs<2, (mask & 0x40)!=0>::impl(
110
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
111
+ vec.values.val[1] =
112
+ BlendRegs<3, (mask & 0x80)!=0>::impl(
113
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
114
+ return vec;
115
+ }
116
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
117
+ const Vectorized<float>& mask) {
118
+ // TODO
119
+ // NB: This requires that each value, i.e., each uint value,
120
+ // of the mask either all be zeros or all be 1s.
121
+ // We perhaps need some kind of an assert?
122
+ // But that will affect performance.
123
+ Vectorized<float> vec(mask.values);
124
+ vec.values.val[0] = vbslq_f32(
125
+ vreinterpretq_u32_f32(vec.values.val[0]),
126
+ b.values.val[0],
127
+ a.values.val[0]);
128
+ vec.values.val[1] = vbslq_f32(
129
+ vreinterpretq_u32_f32(vec.values.val[1]),
130
+ b.values.val[1],
131
+ a.values.val[1]);
132
+ return vec;
133
+ }
134
+ template<typename step_t>
135
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
136
+ const Vectorized<float> base_vec(base);
137
+ const Vectorized<float> step_vec(step);
138
+ const Vectorized<float> step_sizes(0, 1, 2, 3, 4, 5, 6, 7);
139
+ return fmadd(step_sizes, step_vec, base_vec);
140
+ }
141
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
142
+ int64_t count = size()) {
143
+ switch (count) {
144
+ case 0:
145
+ return a;
146
+ case 1:
147
+ {
148
+ Vectorized<float> vec;
149
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0x0, 0x0, 0x0};
150
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
151
+ vec.values.val[1] = a.values.val[1];
152
+ vec.values.val[0] = vbslq_f32(
153
+ vreinterpretq_u32_f32(vec.values.val[0]),
154
+ b.values.val[0],
155
+ a.values.val[0]);
156
+ return vec;
157
+ }
158
+ case 2:
159
+ {
160
+ Vectorized<float> vec;
161
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
162
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
163
+ vec.values.val[1] = a.values.val[1];
164
+ vec.values.val[0] = vbslq_f32(
165
+ vreinterpretq_u32_f32(vec.values.val[0]),
166
+ b.values.val[0],
167
+ a.values.val[0]);
168
+ return vec;
169
+ }
170
+ case 3:
171
+ {
172
+ Vectorized<float> vec;
173
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
174
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
175
+ vec.values.val[1] = a.values.val[1];
176
+ vec.values.val[0] = vbslq_f32(
177
+ vreinterpretq_u32_f32(vec.values.val[0]),
178
+ b.values.val[0],
179
+ a.values.val[0]);
180
+ return vec;
181
+ }
182
+ case 4:
183
+ return Vectorized<float>(b.values.val[0], a.values.val[1]);
184
+ case 5:
185
+ {
186
+ Vectorized<float> vec;
187
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0x0, 0x0, 0x0};
188
+ vec.values.val[0] = b.values.val[0];
189
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
190
+ vec.values.val[1] = vbslq_f32(
191
+ vreinterpretq_u32_f32(vec.values.val[1]),
192
+ b.values.val[1],
193
+ a.values.val[1]);
194
+ return vec;
195
+ }
196
+ case 6:
197
+ {
198
+ Vectorized<float> vec;
199
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
200
+ vec.values.val[0] = b.values.val[0];
201
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
202
+ vec.values.val[1] = vbslq_f32(
203
+ vreinterpretq_u32_f32(vec.values.val[1]),
204
+ b.values.val[1],
205
+ a.values.val[1]);
206
+ return vec;
207
+ }
208
+ case 7:
209
+ {
210
+ Vectorized<float> vec;
211
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
212
+ vec.values.val[0] = b.values.val[0];
213
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
214
+ vec.values.val[1] = vbslq_f32(
215
+ vreinterpretq_u32_f32(vec.values.val[1]),
216
+ b.values.val[1],
217
+ a.values.val[1]);
218
+ return vec;
219
+ }
220
+ }
221
+ return b;
222
+ }
223
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
224
+ if (count == size()) {
225
+ return vld1q_f32_x2(reinterpret_cast<const float*>(ptr));
226
+ }
227
+ else if (count == (size() >> 1)) {
228
+ Vectorized<float> res;
229
+ res.values.val[0] = vld1q_f32(reinterpret_cast<const float*>(ptr));
230
+ res.values.val[1] = vdupq_n_f32(0.f);
231
+ return res;
232
+ }
233
+ else {
234
+ __at_align__ float tmp_values[size()];
235
+ for (const auto i : c10::irange(size())) {
236
+ tmp_values[i] = 0.0;
237
+ }
238
+ std::memcpy(
239
+ tmp_values,
240
+ reinterpret_cast<const float*>(ptr),
241
+ count * sizeof(float));
242
+ return vld1q_f32_x2(reinterpret_cast<const float*>(tmp_values));
243
+ }
244
+ }
245
+ void store(void* ptr, int64_t count = size()) const {
246
+ if (count == size()) {
247
+ vst1q_f32_x2(reinterpret_cast<float*>(ptr), values);
248
+ }
249
+ else if (count == (size() >> 1)) {
250
+ vst1q_f32(reinterpret_cast<float*>(ptr), values.val[0]);
251
+ }
252
+ else {
253
+ float tmp_values[size()];
254
+ vst1q_f32_x2(reinterpret_cast<float*>(tmp_values), values);
255
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
256
+ }
257
+ }
258
+ inline const float32x4_t& get_low() const {
259
+ return values.val[0];
260
+ }
261
+ inline float32x4_t& get_low() {
262
+ return values.val[0];
263
+ }
264
+ inline const float32x4_t& get_high() const {
265
+ return values.val[1];
266
+ }
267
+ inline float32x4_t& get_high() {
268
+ return values.val[1];
269
+ }
270
+ // Very slow implementation of indexing.
271
+ // Only required because vec256_qint refers to this.
272
+ // Once we specialize that implementation for ARM
273
+ // this should be removed. TODO (kimishpatel)
274
+ float operator[](int idx) const {
275
+ __at_align__ float tmp[size()];
276
+ store(tmp);
277
+ return tmp[idx];
278
+ }
279
+ float operator[](int idx) {
280
+ __at_align__ float tmp[size()];
281
+ store(tmp);
282
+ return tmp[idx];
283
+ }
284
+ // For boolean version where we want to if any 1/all zero
285
+ // etc. can be done faster in a different way.
286
+ int zero_mask() const {
287
+ __at_align__ float tmp[size()];
288
+ store(tmp);
289
+ int mask = 0;
290
+ for (int i = 0; i < size(); ++ i) {
291
+ if (tmp[i] == 0.f) {
292
+ mask |= (1 << i);
293
+ }
294
+ }
295
+ return mask;
296
+ }
297
+ Vectorized<float> isnan() const {
298
+ __at_align__ float tmp[size()];
299
+ __at_align__ float res[size()];
300
+ store(tmp);
301
+ for (const auto i : c10::irange(size())) {
302
+ if (_isnan(tmp[i])) {
303
+ std::memset(static_cast<void*>(&res[i]), 0xFF, sizeof(float));
304
+ } else {
305
+ std::memset(static_cast<void*>(&res[i]), 0, sizeof(float));
306
+ }
307
+ }
308
+ return loadu(res);
309
+ };
310
+ bool has_inf_nan() const {
311
+ __at_align__ float tmp[size()];
312
+ store(tmp);
313
+ for (const auto i : c10::irange(size())) {
314
+ if(_isnan(tmp[i]) || _isinf(tmp[i])) {
315
+ return true;
316
+ }
317
+ }
318
+ return false;
319
+ }
320
+ Vectorized<float> map(float (*const f)(float)) const {
321
+ __at_align__ float tmp[size()];
322
+ store(tmp);
323
+ for (const auto i : c10::irange(size())) {
324
+ tmp[i] = f(tmp[i]);
325
+ }
326
+ return loadu(tmp);
327
+ }
328
+ Vectorized<float> abs() const {
329
+ return Vectorized<float>(vabsq_f32(values.val[0]), vabsq_f32(values.val[1]));
330
+ }
331
+ Vectorized<float> angle() const {
332
+ auto zero = Vectorized<float>(0);
333
+ auto pi = Vectorized<float>(c10::pi<float>);
334
+ auto tmp = blendv(zero, pi, *this < zero);
335
+ return blendv(tmp, *this, isnan());
336
+ }
337
+ Vectorized<float> real() const {
338
+ return *this;
339
+ }
340
+ Vectorized<float> imag() const {
341
+ return Vectorized<float>(0.f);
342
+ }
343
+ Vectorized<float> conj() const {
344
+ return *this;
345
+ }
346
+ Vectorized<float> acos() const {
347
+ return USE_SLEEF(
348
+ Vectorized<float>(Sleef_acosf4_u10(values.val[0]), Sleef_acosf4_u10(values.val[1])),
349
+ map(std::acos)
350
+ );
351
+ }
352
+ Vectorized<float> asin() const {
353
+ return USE_SLEEF(
354
+ Vectorized<float>(Sleef_asinf4_u10(values.val[0]), Sleef_asinf4_u10(values.val[1])),
355
+ map(std::asin)
356
+ );
357
+ }
358
+ Vectorized<float> atan() const {
359
+ return USE_SLEEF(
360
+ Vectorized<float>(Sleef_atanf4_u10(values.val[0]), Sleef_atanf4_u10(values.val[1])),
361
+ map(std::atan)
362
+ );
363
+ }
364
+ Vectorized<float> atanh() const {
365
+ return USE_SLEEF(
366
+ Vectorized<float>(Sleef_atanhf4_u10(values.val[0]), Sleef_atanhf4_u10(values.val[1])),
367
+ map(std::atanh)
368
+ );
369
+ }
370
+ Vectorized<float> atan2(const Vectorized<float> &exp) const {
371
+ USE_SLEEF(
372
+ {
373
+ return Vectorized<float>(Sleef_atan2f4_u10(values.val[0], exp.values.val[0]),
374
+ Sleef_atan2f4_u10(values.val[1], exp.values.val[1]));
375
+ },
376
+ {
377
+ __at_align__ float tmp[size()];
378
+ __at_align__ float tmp_exp[size()];
379
+ store(tmp);
380
+ exp.store(tmp_exp);
381
+ for (const auto i : c10::irange(size())) {
382
+ tmp[i] = std::atan2(tmp[i], tmp_exp[i]);
383
+ }
384
+ return loadu(tmp);
385
+ }
386
+ )
387
+ }
388
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
389
+ USE_SLEEF(
390
+ {
391
+ return Vectorized<float>(Sleef_copysignf4(values.val[0], sign.values.val[0]),
392
+ Sleef_copysignf4(values.val[1], sign.values.val[1]));
393
+ },
394
+ {
395
+ __at_align__ float tmp[size()];
396
+ __at_align__ float tmp_sign[size()];
397
+ store(tmp);
398
+ sign.store(tmp_sign);
399
+ for (size_type i = 0; i < size(); i++) {
400
+ tmp[i] = std::copysign(tmp[i], tmp_sign[i]);
401
+ }
402
+ return loadu(tmp);
403
+ }
404
+ )
405
+ }
406
+ Vectorized<float> erf() const;
407
+ Vectorized<float> erfc() const {
408
+ return USE_SLEEF(
409
+ Vectorized<float>(Sleef_erfcf4_u15(values.val[0]), Sleef_erfcf4_u15(values.val[1])),
410
+ map(std::erfc)
411
+ );
412
+ }
413
+ Vectorized<float> erfinv() const {
414
+ return map(calc_erfinv);
415
+ }
416
+ Vectorized<float> exp() const {
417
+ return USE_SLEEF(
418
+ Vectorized<float>(Sleef_expf4_u10(values.val[0]), Sleef_expf4_u10(values.val[1])),
419
+ map(std::exp)
420
+ );
421
+ }
422
+ Vectorized<float> exp2() const {
423
+ return USE_SLEEF(
424
+ Vectorized<float>(Sleef_exp2f4_u10(values.val[0]), Sleef_exp2f4_u10(values.val[1])),
425
+ map(std::exp2)
426
+ );
427
+ }
428
+ Vectorized<float> expm1() const {
429
+ return USE_SLEEF(
430
+ Vectorized<float>(Sleef_expm1f4_u10(values.val[0]), Sleef_expm1f4_u10(values.val[1])),
431
+ map(std::expm1)
432
+ );
433
+ }
434
+ Vectorized<float> exp_u20() const {
435
+ return exp();
436
+ }
437
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
438
+ USE_SLEEF(
439
+ {
440
+ return Vectorized<float>(Sleef_fmodf4(values.val[0], q.values.val[0]),
441
+ Sleef_fmodf4(values.val[1], q.values.val[1]));
442
+ },
443
+ {
444
+ __at_align__ float tmp[size()];
445
+ __at_align__ float tmp_q[size()];
446
+ store(tmp);
447
+ q.store(tmp_q);
448
+ for (const auto i : c10::irange(size())) {
449
+ tmp[i] = std::fmod(tmp[i], tmp_q[i]);
450
+ }
451
+ return loadu(tmp);
452
+ }
453
+ )
454
+ }
455
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
456
+ USE_SLEEF(
457
+ {
458
+ return Vectorized<float>(Sleef_hypotf4_u05(values.val[0], b.values.val[0]),
459
+ Sleef_hypotf4_u05(values.val[1], b.values.val[1]));
460
+ },
461
+ {
462
+ __at_align__ float tmp[size()];
463
+ __at_align__ float tmp_b[size()];
464
+ store(tmp);
465
+ b.store(tmp_b);
466
+ for (const auto i : c10::irange(size())) {
467
+ tmp[i] = std::hypot(tmp[i], tmp_b[i]);
468
+ }
469
+ return loadu(tmp);
470
+ }
471
+ )
472
+ }
473
+ Vectorized<float> i0() const {
474
+ return map(calc_i0);
475
+ }
476
+ Vectorized<float> i0e() const {
477
+ return map(calc_i0e);
478
+ }
479
+ Vectorized<float> digamma() const {
480
+ return map(calc_digamma);
481
+ }
482
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
483
+ __at_align__ float tmp[size()];
484
+ __at_align__ float tmp_x[size()];
485
+ store(tmp);
486
+ x.store(tmp_x);
487
+ for (const auto i : c10::irange(size())) {
488
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
489
+ }
490
+ return loadu(tmp);
491
+ }
492
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
493
+ __at_align__ float tmp[size()];
494
+ __at_align__ float tmp_x[size()];
495
+ store(tmp);
496
+ x.store(tmp_x);
497
+ for (const auto i : c10::irange(size())) {
498
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
499
+ }
500
+ return loadu(tmp);
501
+ }
502
+ Vectorized<float> log() const {
503
+ return USE_SLEEF(
504
+ Vectorized<float>(Sleef_logf4_u10(values.val[0]), Sleef_logf4_u10(values.val[1])),
505
+ map(std::log)
506
+ );
507
+ }
508
+ Vectorized<float> log10() const {
509
+ return USE_SLEEF(
510
+ Vectorized<float>(Sleef_log10f4_u10(values.val[0]), Sleef_log10f4_u10(values.val[1])),
511
+ map(std::log10)
512
+ );
513
+ }
514
+ Vectorized<float> log1p() const {
515
+ return USE_SLEEF(
516
+ Vectorized<float>(Sleef_log1pf4_u10(values.val[0]), Sleef_log1pf4_u10(values.val[1])),
517
+ map(std::log1p)
518
+ );
519
+ }
520
+ Vectorized<float> log2() const {
521
+ return USE_SLEEF(
522
+ Vectorized<float>(Sleef_log2f4_u10(values.val[0]), Sleef_log2f4_u10(values.val[1])),
523
+ map(std::log2)
524
+ );
525
+ }
526
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
527
+ USE_SLEEF(
528
+ {
529
+ return Vectorized<float>(Sleef_nextafterf4(values.val[0], b.values.val[0]),
530
+ Sleef_nextafterf4(values.val[1], b.values.val[1]));
531
+ },
532
+ {
533
+ __at_align__ float tmp[size()];
534
+ __at_align__ float tmp_b[size()];
535
+ store(tmp);
536
+ b.store(tmp_b);
537
+ for (const auto i : c10::irange(size())) {
538
+ tmp[i] = std::nextafter(tmp[i], tmp_b[i]);
539
+ }
540
+ return loadu(tmp);
541
+ }
542
+ )
543
+ }
544
+ Vectorized<float> frac() const;
545
+ Vectorized<float> sin() const {
546
+ return USE_SLEEF(
547
+ Vectorized<float>(Sleef_sinf4_u10(values.val[0]), Sleef_sinf4_u10(values.val[1])),
548
+ map(std::sin)
549
+ );
550
+ }
551
+ Vectorized<float> sinh() const {
552
+ return USE_SLEEF(
553
+ Vectorized<float>(Sleef_sinhf4_u10(values.val[0]), Sleef_sinhf4_u10(values.val[1])),
554
+ map(std::sinh)
555
+ );
556
+ }
557
+ Vectorized<float> cos() const {
558
+ return USE_SLEEF(
559
+ Vectorized<float>(Sleef_cosf4_u10(values.val[0]), Sleef_cosf4_u10(values.val[1])),
560
+ map(std::cos)
561
+ );
562
+ }
563
+ Vectorized<float> cosh() const {
564
+ return USE_SLEEF(
565
+ Vectorized<float>(Sleef_coshf4_u10(values.val[0]), Sleef_coshf4_u10(values.val[1])),
566
+ map(std::cosh)
567
+ );
568
+ }
569
+ Vectorized<float> ceil() const {
570
+ return map(at::native::ceil_impl);
571
+ }
572
+ Vectorized<float> floor() const {
573
+ return map(at::native::floor_impl);
574
+ }
575
+ Vectorized<float> neg() const {
576
+ return Vectorized<float>(
577
+ vnegq_f32(values.val[0]),
578
+ vnegq_f32(values.val[1]));
579
+ }
580
+ Vectorized<float> round() const {
581
+ // We do not use std::round because we would like to round midway numbers to the nearest even integer.
582
+ return map(at::native::round_impl);
583
+ }
584
+ Vectorized<float> tan() const {
585
+ return USE_SLEEF(
586
+ Vectorized<float>(Sleef_tanf4_u10(values.val[0]), Sleef_tanf4_u10(values.val[1])),
587
+ map(std::tan)
588
+ );
589
+ }
590
+ Vectorized<float> tanh() const {
591
+ return USE_SLEEF(
592
+ Vectorized<float>(Sleef_tanhf4_u10(values.val[0]), Sleef_tanhf4_u10(values.val[1])),
593
+ map(std::tanh)
594
+ );
595
+ }
596
+ Vectorized<float> trunc() const {
597
+ float32x4_t r0 = vrndq_f32(values.val[0]);
598
+ float32x4_t r1 = vrndq_f32(values.val[1]);
599
+ return Vectorized<float>(r0, r1);
600
+ }
601
+ Vectorized<float> lgamma() const {
602
+ return USE_SLEEF(
603
+ Vectorized<float>(Sleef_lgammaf4_u10(values.val[0]), Sleef_lgammaf4_u10(values.val[1])),
604
+ map(std::lgamma)
605
+ );
606
+ }
607
+ Vectorized<float> sqrt() const {
608
+ return Vectorized<float>(
609
+ vsqrtq_f32(values.val[0]),
610
+ vsqrtq_f32(values.val[1]));
611
+ }
612
+ Vectorized<float> reciprocal() const {
613
+ auto r0 = vdivq_f32(vdupq_n_f32(1.0f), values.val[0]);
614
+ auto r1 = vdivq_f32(vdupq_n_f32(1.0f), values.val[1]);
615
+ return Vectorized<float>(r0, r1);
616
+ }
617
+ Vectorized<float> rsqrt() const {
618
+ return this->sqrt().reciprocal();
619
+ }
620
+ Vectorized<float> pow(const Vectorized<float> &exp) const {
621
+ USE_SLEEF(
622
+ {
623
+ return Vectorized<float>(Sleef_powf4_u10(values.val[0], exp.values.val[0]),
624
+ Sleef_powf4_u10(values.val[1], exp.values.val[1]));
625
+ },
626
+ {
627
+ __at_align__ float tmp[size()];
628
+ __at_align__ float tmp_exp[size()];
629
+ store(tmp);
630
+ exp.store(tmp_exp);
631
+ for (const auto i : c10::irange(size())) {
632
+ tmp[i] = std::pow(tmp[i], tmp_exp[i]);
633
+ }
634
+ return loadu(tmp);
635
+ }
636
+ )
637
+ }
638
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
639
+ float32x4_t r0 =
640
+ vreinterpretq_f32_u32(vceqq_f32(values.val[0], other.values.val[0]));
641
+ float32x4_t r1 =
642
+ vreinterpretq_f32_u32(vceqq_f32(values.val[1], other.values.val[1]));
643
+ return Vectorized<float>(r0, r1);
644
+ }
645
+
646
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
647
+ float32x4_t r0 = vreinterpretq_f32_u32(
648
+ vmvnq_u32(vceqq_f32(values.val[0], other.values.val[0])));
649
+ float32x4_t r1 = vreinterpretq_f32_u32(
650
+ vmvnq_u32(vceqq_f32(values.val[1], other.values.val[1])));
651
+ return Vectorized<float>(r0, r1);
652
+ }
653
+
654
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
655
+ float32x4_t r0 =
656
+ vreinterpretq_f32_u32(vcltq_f32(values.val[0], other.values.val[0]));
657
+ float32x4_t r1 =
658
+ vreinterpretq_f32_u32(vcltq_f32(values.val[1], other.values.val[1]));
659
+ return Vectorized<float>(r0, r1);
660
+ }
661
+
662
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
663
+ float32x4_t r0 =
664
+ vreinterpretq_f32_u32(vcleq_f32(values.val[0], other.values.val[0]));
665
+ float32x4_t r1 =
666
+ vreinterpretq_f32_u32(vcleq_f32(values.val[1], other.values.val[1]));
667
+ return Vectorized<float>(r0, r1);
668
+ }
669
+
670
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
671
+ float32x4_t r0 =
672
+ vreinterpretq_f32_u32(vcgtq_f32(values.val[0], other.values.val[0]));
673
+ float32x4_t r1 =
674
+ vreinterpretq_f32_u32(vcgtq_f32(values.val[1], other.values.val[1]));
675
+ return Vectorized<float>(r0, r1);
676
+ }
677
+
678
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
679
+ float32x4_t r0 =
680
+ vreinterpretq_f32_u32(vcgeq_f32(values.val[0], other.values.val[0]));
681
+ float32x4_t r1 =
682
+ vreinterpretq_f32_u32(vcgeq_f32(values.val[1], other.values.val[1]));
683
+ return Vectorized<float>(r0, r1);
684
+ }
685
+
686
+ Vectorized<float> eq(const Vectorized<float>& other) const;
687
+ Vectorized<float> ne(const Vectorized<float>& other) const;
688
+ Vectorized<float> gt(const Vectorized<float>& other) const;
689
+ Vectorized<float> ge(const Vectorized<float>& other) const;
690
+ Vectorized<float> lt(const Vectorized<float>& other) const;
691
+ Vectorized<float> le(const Vectorized<float>& other) const;
692
+ };
693
+
694
+ template <>
695
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
696
+ float32x4_t r0 = vaddq_f32(a.get_low(), b.get_low());
697
+ float32x4_t r1 = vaddq_f32(a.get_high(), b.get_high());
698
+ return Vectorized<float>(r0, r1);
699
+ }
700
+
701
+ template <>
702
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
703
+ float32x4_t r0 = vsubq_f32(a.get_low(), b.get_low());
704
+ float32x4_t r1 = vsubq_f32(a.get_high(), b.get_high());
705
+ return Vectorized<float>(r0, r1);
706
+ }
707
+
708
+ template <>
709
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
710
+ float32x4_t r0 = vmulq_f32(a.get_low(), b.get_low());
711
+ float32x4_t r1 = vmulq_f32(a.get_high(), b.get_high());
712
+ return Vectorized<float>(r0, r1);
713
+ }
714
+
715
+ template <>
716
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
717
+ float32x4_t r0 = vdivq_f32(a.get_low(), b.get_low());
718
+ float32x4_t r1 = vdivq_f32(a.get_high(), b.get_high());
719
+ return Vectorized<float>(r0, r1);
720
+ }
721
+
722
+ // frac. Implement this here so we can use subtraction
723
+ inline Vectorized<float> Vectorized<float>::frac() const {
724
+ return *this - this->trunc();
725
+ }
726
+
727
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
728
+ // either input is a NaN.
729
+ template <>
730
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
731
+ float32x4_t r0 = vmaxq_f32(a.get_low(), b.get_low());
732
+ float32x4_t r1 = vmaxq_f32(a.get_high(), b.get_high());
733
+ return Vectorized<float>(r0, r1);
734
+ }
735
+
736
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
737
+ // either input is a NaN.
738
+ template <>
739
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
740
+ float32x4_t r0 = vminq_f32(a.get_low(), b.get_low());
741
+ float32x4_t r1 = vminq_f32(a.get_high(), b.get_high());
742
+ return Vectorized<float>(r0, r1);
743
+ }
744
+
745
+ template <>
746
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
747
+ return minimum(max, maximum(min, a));
748
+ }
749
+
750
+ template <>
751
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
752
+ return minimum(max, a);
753
+ }
754
+
755
+ template <>
756
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
757
+ return maximum(min, a);
758
+ }
759
+
760
+ template <>
761
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
762
+ float32x4_t r0 = vreinterpretq_f32_u32(vandq_u32(
763
+ vreinterpretq_u32_f32(a.get_low()),
764
+ vreinterpretq_u32_f32(b.get_low())));
765
+ float32x4_t r1 = vreinterpretq_f32_u32(vandq_u32(
766
+ vreinterpretq_u32_f32(a.get_high()),
767
+ vreinterpretq_u32_f32(b.get_high())));
768
+ return Vectorized<float>(r0, r1);
769
+ }
770
+
771
+ template <>
772
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
773
+ float32x4_t r0 = vreinterpretq_f32_u32(vorrq_u32(
774
+ vreinterpretq_u32_f32(a.get_low()),
775
+ vreinterpretq_u32_f32(b.get_low())));
776
+ float32x4_t r1 = vreinterpretq_f32_u32(vorrq_u32(
777
+ vreinterpretq_u32_f32(a.get_high()),
778
+ vreinterpretq_u32_f32(b.get_high())));
779
+ return Vectorized<float>(r0, r1);
780
+ }
781
+
782
+ template <>
783
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
784
+ float32x4_t r0 = vreinterpretq_f32_u32(veorq_u32(
785
+ vreinterpretq_u32_f32(a.get_low()),
786
+ vreinterpretq_u32_f32(b.get_low())));
787
+ float32x4_t r1 = vreinterpretq_f32_u32(veorq_u32(
788
+ vreinterpretq_u32_f32(a.get_high()),
789
+ vreinterpretq_u32_f32(b.get_high())));
790
+ return Vectorized<float>(r0, r1);
791
+ }
792
+
793
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
794
+ return (*this == other) & Vectorized<float>(1.0f);
795
+ }
796
+
797
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
798
+ return (*this != other) & Vectorized<float>(1.0f);
799
+ }
800
+
801
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
802
+ return (*this > other) & Vectorized<float>(1.0f);
803
+ }
804
+
805
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
806
+ return (*this >= other) & Vectorized<float>(1.0f);
807
+ }
808
+
809
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
810
+ return (*this < other) & Vectorized<float>(1.0f);
811
+ }
812
+
813
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
814
+ return (*this <= other) & Vectorized<float>(1.0f);
815
+ }
816
+
817
+ template <>
818
+ inline void convert(const float* src, int32_t* dst, int64_t n) {
819
+ int64_t i;
820
+ #pragma unroll
821
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
822
+ vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i)));
823
+ vst1q_s32(dst + i + 4, vcvtq_s32_f32(vld1q_f32(src + i + 4)));
824
+ }
825
+ #pragma unroll
826
+ for (; i < n; i++) {
827
+ dst[i] = static_cast<int32_t>(src[i]);
828
+ }
829
+ }
830
+
831
+ template <>
832
+ inline void convert(const int32_t* src, float* dst, int64_t n) {
833
+ int64_t i;
834
+ #pragma unroll
835
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
836
+ vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i)));
837
+ vst1q_f32(dst + i + 4, vcvtq_f32_s32(vld1q_s32(src + i + 4)));
838
+ }
839
+ #pragma unroll
840
+ for (; i < n; i++) {
841
+ dst[i] = static_cast<float>(src[i]);
842
+ }
843
+ }
844
+
845
+ template <>
846
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
847
+ float32x4_t r0 = vfmaq_f32(c.get_low(), a.get_low(), b.get_low());
848
+ float32x4_t r1 = vfmaq_f32(c.get_high(), a.get_high(), b.get_high());
849
+ return Vectorized<float>(r0, r1);
850
+ }
851
+
852
+ template <>
853
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
854
+ float32x4_t r0 = vfmsq_f32(c.get_low(), a.get_low(), b.get_low());
855
+ float32x4_t r1 = vfmsq_f32(c.get_high(), a.get_high(), b.get_high());
856
+ return Vectorized<float>(r0, r1);
857
+ }
858
+
859
+ inline Vectorized<float> Vectorized<float>::erf() const{
860
+ // constants
861
+ const Vectorized<float> neg_zero_vec(-0.f);
862
+ const Vectorized<float> one_vec(1.0f);
863
+ const Vectorized<float> p(0.3275911f);
864
+ const Vectorized<float> p1(0.254829592f);
865
+ const Vectorized<float> p2(-0.284496736f);
866
+ const Vectorized<float> p3(1.421413741f);
867
+ const Vectorized<float> p4(-1.453152027f);
868
+ const Vectorized<float> p5(1.061405429f);
869
+ // sign(x)
870
+ auto sign_mask = neg_zero_vec & *this;
871
+ auto abs_vec = this->abs();
872
+ // t = 1 / (p * abs(x) + 1)
873
+ auto tmp0 = fmadd(p, abs_vec, one_vec);
874
+ auto t = one_vec / tmp0;
875
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
876
+ auto tmp1 = fmadd(p5, t, p4);
877
+ auto tmp2 = fmadd(tmp1, t, p3);
878
+ auto tmp3 = fmadd(tmp2, t, p2);
879
+ auto r = fmadd(tmp3, t, p1);
880
+ // - exp(- x * x)
881
+ auto pow_2 = (*this) * (*this);
882
+ auto neg_pow_2 = pow_2 ^ neg_zero_vec;
883
+ auto tmp4 = neg_pow_2.map(std::exp); // This can be swapped for a faster implementation of exp.
884
+ auto tmp5 = tmp4 ^ neg_zero_vec;
885
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
886
+ auto tmp6 = t * tmp5;
887
+ auto tmp7 = fmadd(tmp6, r, one_vec);
888
+ return tmp7 ^ sign_mask;
889
+ }
890
+ #endif /* defined(aarch64) */
891
+
892
+ }} // namespace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h ADDED
@@ -0,0 +1,1586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/macros/Macros.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ namespace at::vec {
12
+ inline namespace CPU_CAPABILITY {
13
+
14
+ #ifdef CPU_CAPABILITY_AVX2
15
+
16
+ struct Vectorizedi {
17
+ protected:
18
+ __m256i values;
19
+
20
+ static inline __m256i invert(const __m256i& v) {
21
+ const auto ones = _mm256_set1_epi64x(-1);
22
+ return _mm256_xor_si256(ones, v);
23
+ }
24
+ public:
25
+ Vectorizedi() {}
26
+ Vectorizedi(__m256i v) : values(v) {}
27
+ operator __m256i() const {
28
+ return values;
29
+ }
30
+ };
31
+
32
+ #else
33
+
34
+ struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined
35
+
36
+ #endif // CPU_CAPABILITY_AVX2
37
+
38
+ #ifdef CPU_CAPABILITY_AVX2
39
+
40
+ template <>
41
+ class Vectorized<int64_t> : public Vectorizedi {
42
+ private:
43
+ static const Vectorized<int64_t> ones;
44
+ public:
45
+ using value_type = int64_t;
46
+ using size_type = int;
47
+ static constexpr size_type size() {
48
+ return 4;
49
+ }
50
+ using Vectorizedi::Vectorizedi;
51
+ Vectorized() {}
52
+ Vectorized(int64_t v) { values = _mm256_set1_epi64x(v); }
53
+ Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4) {
54
+ values = _mm256_setr_epi64x(val1, val2, val3, val4);
55
+ }
56
+ template <int64_t mask>
57
+ static Vectorized<int64_t> blend(Vectorized<int64_t> a, Vectorized<int64_t> b) {
58
+ __at_align__ int64_t tmp_values[size()];
59
+ a.store(tmp_values);
60
+ if (mask & 0x01)
61
+ tmp_values[0] = _mm256_extract_epi64(b.values, 0);
62
+ if (mask & 0x02)
63
+ tmp_values[1] = _mm256_extract_epi64(b.values, 1);
64
+ if (mask & 0x04)
65
+ tmp_values[2] = _mm256_extract_epi64(b.values, 2);
66
+ if (mask & 0x08)
67
+ tmp_values[3] = _mm256_extract_epi64(b.values, 3);
68
+ return loadu(tmp_values);
69
+ }
70
+ static Vectorized<int64_t> blendv(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b,
71
+ const Vectorized<int64_t>& mask) {
72
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
73
+ }
74
+ template <typename step_t>
75
+ static Vectorized<int64_t> arange(int64_t base = 0, step_t step = static_cast<step_t>(1)) {
76
+ return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
77
+ }
78
+ static Vectorized<int64_t>
79
+ set(Vectorized<int64_t> a, Vectorized<int64_t> b, int64_t count = size()) {
80
+ switch (count) {
81
+ case 0:
82
+ return a;
83
+ case 1:
84
+ return blend<1>(a, b);
85
+ case 2:
86
+ return blend<3>(a, b);
87
+ case 3:
88
+ return blend<7>(a, b);
89
+ }
90
+ return b;
91
+ }
92
+ static Vectorized<int64_t> loadu(const void* ptr) {
93
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
94
+ }
95
+ static Vectorized<int64_t> loadu(const void* ptr, int64_t count) {
96
+ __at_align__ int64_t tmp_values[size()];
97
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
98
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
99
+ // instructions while a loop would be compiled to one instruction.
100
+ for (const auto i : c10::irange(size())) {
101
+ tmp_values[i] = 0;
102
+ }
103
+ std::memcpy(tmp_values, ptr, count * sizeof(int64_t));
104
+ return loadu(tmp_values);
105
+ }
106
+ void store(void* ptr, int count = size()) const {
107
+ if (count == size()) {
108
+ // ptr need not to be aligned here. See
109
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
110
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
111
+ } else if (count > 0) {
112
+ __at_align__ int64_t tmp_values[size()];
113
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
114
+ std::memcpy(ptr, tmp_values, count * sizeof(int64_t));
115
+ }
116
+ }
117
+ const int64_t& operator[](int idx) const = delete;
118
+ int64_t& operator[](int idx) = delete;
119
+ Vectorized<int64_t> abs() const {
120
+ auto zero = _mm256_set1_epi64x(0);
121
+ auto is_larger = _mm256_cmpgt_epi64(zero, values);
122
+ auto inverse = _mm256_xor_si256(values, is_larger);
123
+ return _mm256_sub_epi64(inverse, is_larger);
124
+ }
125
+ Vectorized<int64_t> real() const {
126
+ return *this;
127
+ }
128
+ Vectorized<int64_t> imag() const {
129
+ return _mm256_set1_epi64x(0);
130
+ }
131
+ Vectorized<int64_t> conj() const {
132
+ return *this;
133
+ }
134
+ Vectorized<int64_t> neg() const;
135
+ Vectorized<int64_t> operator==(const Vectorized<int64_t>& other) const {
136
+ return _mm256_cmpeq_epi64(values, other.values);
137
+ }
138
+ Vectorized<int64_t> operator!=(const Vectorized<int64_t>& other) const {
139
+ return invert(_mm256_cmpeq_epi64(values, other.values));
140
+ }
141
+ Vectorized<int64_t> operator<(const Vectorized<int64_t>& other) const {
142
+ return _mm256_cmpgt_epi64(other.values, values);
143
+ }
144
+ Vectorized<int64_t> operator<=(const Vectorized<int64_t>& other) const {
145
+ return invert(_mm256_cmpgt_epi64(values, other.values));
146
+ }
147
+ Vectorized<int64_t> operator>(const Vectorized<int64_t>& other) const {
148
+ return _mm256_cmpgt_epi64(values, other.values);
149
+ }
150
+ Vectorized<int64_t> operator>=(const Vectorized<int64_t>& other) const {
151
+ return invert(_mm256_cmpgt_epi64(other.values, values));
152
+ }
153
+
154
+ Vectorized<int64_t> eq(const Vectorized<int64_t>& other) const;
155
+ Vectorized<int64_t> ne(const Vectorized<int64_t>& other) const;
156
+ Vectorized<int64_t> gt(const Vectorized<int64_t>& other) const;
157
+ Vectorized<int64_t> ge(const Vectorized<int64_t>& other) const;
158
+ Vectorized<int64_t> lt(const Vectorized<int64_t>& other) const;
159
+ Vectorized<int64_t> le(const Vectorized<int64_t>& other) const;
160
+ };
161
+
162
+ template <>
163
+ class Vectorized<int32_t> : public Vectorizedi {
164
+ private:
165
+ static const Vectorized<int32_t> ones;
166
+ public:
167
+ using value_type = int32_t;
168
+ static constexpr int size() {
169
+ return 8;
170
+ }
171
+ using Vectorizedi::Vectorizedi;
172
+ Vectorized() {}
173
+ Vectorized(int32_t v) { values = _mm256_set1_epi32(v); }
174
+ Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4,
175
+ int32_t val5, int32_t val6, int32_t val7, int32_t val8) {
176
+ values = _mm256_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8);
177
+ }
178
+ template <int64_t mask>
179
+ static Vectorized<int32_t> blend(Vectorized<int32_t> a, Vectorized<int32_t> b) {
180
+ return _mm256_blend_epi32(a, b, mask);
181
+ }
182
+ static Vectorized<int32_t> blendv(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b,
183
+ const Vectorized<int32_t>& mask) {
184
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
185
+ }
186
+ template <typename step_t>
187
+ static Vectorized<int32_t> arange(int32_t base = 0, step_t step = static_cast<step_t>(1)) {
188
+ return Vectorized<int32_t>(
189
+ base, base + step, base + 2 * step, base + 3 * step,
190
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
191
+ }
192
+ static Vectorized<int32_t>
193
+ set(Vectorized<int32_t> a, Vectorized<int32_t> b, int32_t count = size()) {
194
+ switch (count) {
195
+ case 0:
196
+ return a;
197
+ case 1:
198
+ return blend<1>(a, b);
199
+ case 2:
200
+ return blend<3>(a, b);
201
+ case 3:
202
+ return blend<7>(a, b);
203
+ case 4:
204
+ return blend<15>(a, b);
205
+ case 5:
206
+ return blend<31>(a, b);
207
+ case 6:
208
+ return blend<63>(a, b);
209
+ case 7:
210
+ return blend<127>(a, b);
211
+ }
212
+ return b;
213
+ }
214
+ static Vectorized<int32_t> loadu(const void* ptr) {
215
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
216
+ }
217
+ static Vectorized<int32_t> loadu(const void* ptr, int32_t count) {
218
+ __at_align__ int32_t tmp_values[size()];
219
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
220
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
221
+ // instructions while a loop would be compiled to one instruction.
222
+ for (const auto i : c10::irange(size())) {
223
+ tmp_values[i] = 0;
224
+ }
225
+ std::memcpy(tmp_values, ptr, count * sizeof(int32_t));
226
+ return loadu(tmp_values);
227
+ }
228
+ void store(void* ptr, int count = size()) const {
229
+ if (count == size()) {
230
+ // ptr need not to be aligned here. See
231
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
232
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
233
+ } else if (count > 0) {
234
+ __at_align__ int32_t tmp_values[size()];
235
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
236
+ std::memcpy(ptr, tmp_values, count * sizeof(int32_t));
237
+ }
238
+ }
239
+ const int32_t& operator[](int idx) const = delete;
240
+ int32_t& operator[](int idx) = delete;
241
+ Vectorized<int32_t> abs() const {
242
+ return _mm256_abs_epi32(values);
243
+ }
244
+ Vectorized<int32_t> real() const {
245
+ return *this;
246
+ }
247
+ Vectorized<int32_t> imag() const {
248
+ return _mm256_set1_epi32(0);
249
+ }
250
+ Vectorized<int32_t> conj() const {
251
+ return *this;
252
+ }
253
+ Vectorized<int32_t> neg() const;
254
+ Vectorized<int32_t> operator==(const Vectorized<int32_t>& other) const {
255
+ return _mm256_cmpeq_epi32(values, other.values);
256
+ }
257
+ Vectorized<int32_t> operator!=(const Vectorized<int32_t>& other) const {
258
+ return invert(_mm256_cmpeq_epi32(values, other.values));
259
+ }
260
+ Vectorized<int32_t> operator<(const Vectorized<int32_t>& other) const {
261
+ return _mm256_cmpgt_epi32(other.values, values);
262
+ }
263
+ Vectorized<int32_t> operator<=(const Vectorized<int32_t>& other) const {
264
+ return invert(_mm256_cmpgt_epi32(values, other.values));
265
+ }
266
+ Vectorized<int32_t> operator>(const Vectorized<int32_t>& other) const {
267
+ return _mm256_cmpgt_epi32(values, other.values);
268
+ }
269
+ Vectorized<int32_t> operator>=(const Vectorized<int32_t>& other) const {
270
+ return invert(_mm256_cmpgt_epi32(other.values, values));
271
+ }
272
+ Vectorized<int32_t> eq(const Vectorized<int32_t>& other) const;
273
+ Vectorized<int32_t> ne(const Vectorized<int32_t>& other) const;
274
+ Vectorized<int32_t> gt(const Vectorized<int32_t>& other) const;
275
+ Vectorized<int32_t> ge(const Vectorized<int32_t>& other) const;
276
+ Vectorized<int32_t> lt(const Vectorized<int32_t>& other) const;
277
+ Vectorized<int32_t> le(const Vectorized<int32_t>& other) const;
278
+ };
279
+
280
+ template <>
281
+ inline void convert(const int32_t *src, float *dst, int64_t n) {
282
+ int64_t i;
283
+ // int32_t and float have same size
284
+ #ifndef _MSC_VER
285
+ # pragma unroll
286
+ #endif
287
+ for (i = 0; i <= (n - Vectorized<int32_t>::size()); i += Vectorized<int32_t>::size()) {
288
+ auto input_vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i));
289
+ auto output_vec = _mm256_cvtepi32_ps(input_vec);
290
+ _mm256_storeu_ps(reinterpret_cast<float*>(dst + i), output_vec);
291
+ }
292
+ #ifndef _MSC_VER
293
+ # pragma unroll
294
+ #endif
295
+ for (; i < n; i++) {
296
+ dst[i] = static_cast<float>(src[i]);
297
+ }
298
+ }
299
+
300
+ template <>
301
+ inline void convert(const int32_t *src, double *dst, int64_t n) {
302
+ int64_t i;
303
+ // int32_t has half the size of double
304
+ #ifndef _MSC_VER
305
+ # pragma unroll
306
+ #endif
307
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
308
+ auto input_128_vec = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src + i));
309
+ auto output_vec = _mm256_cvtepi32_pd(input_128_vec);
310
+ _mm256_storeu_pd(reinterpret_cast<double*>(dst + i), output_vec);
311
+ }
312
+ #ifndef _MSC_VER
313
+ # pragma unroll
314
+ #endif
315
+ for (; i < n; i++) {
316
+ dst[i] = static_cast<double>(src[i]);
317
+ }
318
+ }
319
+
320
+ template <>
321
+ class Vectorized<int16_t> : public Vectorizedi {
322
+ private:
323
+ static const Vectorized<int16_t> ones;
324
+ public:
325
+ using value_type = int16_t;
326
+ static constexpr int size() {
327
+ return 16;
328
+ }
329
+ using Vectorizedi::Vectorizedi;
330
+ Vectorized() {}
331
+ Vectorized(int16_t v) { values = _mm256_set1_epi16(v); }
332
+ Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4,
333
+ int16_t val5, int16_t val6, int16_t val7, int16_t val8,
334
+ int16_t val9, int16_t val10, int16_t val11, int16_t val12,
335
+ int16_t val13, int16_t val14, int16_t val15, int16_t val16) {
336
+ values = _mm256_setr_epi16(val1, val2, val3, val4, val5, val6, val7, val8,
337
+ val9, val10, val11, val12, val13, val14, val15, val16);
338
+ }
339
+ template <int64_t mask>
340
+ static Vectorized<int16_t> blend(Vectorized<int16_t> a, Vectorized<int16_t> b) {
341
+ __at_align__ int16_t tmp_values[size()];
342
+ a.store(tmp_values);
343
+ if (mask & 0x01)
344
+ tmp_values[0] = _mm256_extract_epi16(b.values, 0);
345
+ if (mask & 0x02)
346
+ tmp_values[1] = _mm256_extract_epi16(b.values, 1);
347
+ if (mask & 0x04)
348
+ tmp_values[2] = _mm256_extract_epi16(b.values, 2);
349
+ if (mask & 0x08)
350
+ tmp_values[3] = _mm256_extract_epi16(b.values, 3);
351
+ if (mask & 0x10)
352
+ tmp_values[4] = _mm256_extract_epi16(b.values, 4);
353
+ if (mask & 0x20)
354
+ tmp_values[5] = _mm256_extract_epi16(b.values, 5);
355
+ if (mask & 0x40)
356
+ tmp_values[6] = _mm256_extract_epi16(b.values, 6);
357
+ if (mask & 0x80)
358
+ tmp_values[7] = _mm256_extract_epi16(b.values, 7);
359
+ if (mask & 0x100)
360
+ tmp_values[8] = _mm256_extract_epi16(b.values, 8);
361
+ if (mask & 0x200)
362
+ tmp_values[9] = _mm256_extract_epi16(b.values, 9);
363
+ if (mask & 0x400)
364
+ tmp_values[10] = _mm256_extract_epi16(b.values, 10);
365
+ if (mask & 0x800)
366
+ tmp_values[11] = _mm256_extract_epi16(b.values, 11);
367
+ if (mask & 0x1000)
368
+ tmp_values[12] = _mm256_extract_epi16(b.values, 12);
369
+ if (mask & 0x2000)
370
+ tmp_values[13] = _mm256_extract_epi16(b.values, 13);
371
+ if (mask & 0x4000)
372
+ tmp_values[14] = _mm256_extract_epi16(b.values, 14);
373
+ if (mask & 0x8000)
374
+ tmp_values[15] = _mm256_extract_epi16(b.values, 15);
375
+ return loadu(tmp_values);
376
+ }
377
+ static Vectorized<int16_t> blendv(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b,
378
+ const Vectorized<int16_t>& mask) {
379
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
380
+ }
381
+ template <typename step_t>
382
+ static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
383
+ return Vectorized<int16_t>(
384
+ base, base + step, base + 2 * step, base + 3 * step,
385
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
386
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
387
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
388
+ }
389
+ static Vectorized<int16_t>
390
+ set(Vectorized<int16_t> a, Vectorized<int16_t> b, int16_t count = size()) {
391
+ switch (count) {
392
+ case 0:
393
+ return a;
394
+ case 1:
395
+ return blend<1>(a, b);
396
+ case 2:
397
+ return blend<3>(a, b);
398
+ case 3:
399
+ return blend<7>(a, b);
400
+ case 4:
401
+ return blend<15>(a, b);
402
+ case 5:
403
+ return blend<31>(a, b);
404
+ case 6:
405
+ return blend<63>(a, b);
406
+ case 7:
407
+ return blend<127>(a, b);
408
+ case 8:
409
+ return blend<255>(a, b);
410
+ case 9:
411
+ return blend<511>(a, b);
412
+ case 10:
413
+ return blend<1023>(a, b);
414
+ case 11:
415
+ return blend<2047>(a, b);
416
+ case 12:
417
+ return blend<4095>(a, b);
418
+ case 13:
419
+ return blend<8191>(a, b);
420
+ case 14:
421
+ return blend<16383>(a, b);
422
+ case 15:
423
+ return blend<32767>(a, b);
424
+ }
425
+ return b;
426
+ }
427
+ static Vectorized<int16_t> loadu(const void* ptr) {
428
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
429
+ }
430
+ static Vectorized<int16_t> loadu(const void* ptr, int16_t count) {
431
+ __at_align__ int16_t tmp_values[size()];
432
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
433
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
434
+ // instructions while a loop would be compiled to one instruction.
435
+ for (const auto i : c10::irange(size())) {
436
+ tmp_values[i] = 0;
437
+ }
438
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
439
+ return loadu(tmp_values);
440
+ }
441
+ void store(void* ptr, int count = size()) const {
442
+ if (count == size()) {
443
+ // ptr need not to be aligned here. See
444
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
445
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
446
+ } else if (count > 0) {
447
+ __at_align__ int16_t tmp_values[size()];
448
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
449
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
450
+ }
451
+ }
452
+ const int16_t& operator[](int idx) const = delete;
453
+ int16_t& operator[](int idx) = delete;
454
+ Vectorized<int16_t> abs() const {
455
+ return _mm256_abs_epi16(values);
456
+ }
457
+ Vectorized<int16_t> real() const {
458
+ return *this;
459
+ }
460
+ Vectorized<int16_t> imag() const {
461
+ return _mm256_set1_epi16(0);
462
+ }
463
+ Vectorized<int16_t> conj() const {
464
+ return *this;
465
+ }
466
+ Vectorized<int16_t> neg() const;
467
+ Vectorized<int16_t> operator==(const Vectorized<int16_t>& other) const {
468
+ return _mm256_cmpeq_epi16(values, other.values);
469
+ }
470
+ Vectorized<int16_t> operator!=(const Vectorized<int16_t>& other) const {
471
+ return invert(_mm256_cmpeq_epi16(values, other.values));
472
+ }
473
+ Vectorized<int16_t> operator<(const Vectorized<int16_t>& other) const {
474
+ return _mm256_cmpgt_epi16(other.values, values);
475
+ }
476
+ Vectorized<int16_t> operator<=(const Vectorized<int16_t>& other) const {
477
+ return invert(_mm256_cmpgt_epi16(values, other.values));
478
+ }
479
+ Vectorized<int16_t> operator>(const Vectorized<int16_t>& other) const {
480
+ return _mm256_cmpgt_epi16(values, other.values);
481
+ }
482
+ Vectorized<int16_t> operator>=(const Vectorized<int16_t>& other) const {
483
+ return invert(_mm256_cmpgt_epi16(other.values, values));
484
+ }
485
+
486
+ Vectorized<int16_t> eq(const Vectorized<int16_t>& other) const;
487
+ Vectorized<int16_t> ne(const Vectorized<int16_t>& other) const;
488
+ Vectorized<int16_t> gt(const Vectorized<int16_t>& other) const;
489
+ Vectorized<int16_t> ge(const Vectorized<int16_t>& other) const;
490
+ Vectorized<int16_t> lt(const Vectorized<int16_t>& other) const;
491
+ Vectorized<int16_t> le(const Vectorized<int16_t>& other) const;
492
+ };
493
+
494
+ template <typename T>
495
+ class Vectorized8 : public Vectorizedi {
496
+ static_assert(
497
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
498
+ "Only int8_t/uint8_t are supported");
499
+ protected:
500
+ static const Vectorized<T> ones;
501
+ public:
502
+ using value_type = T;
503
+ static constexpr int size() {
504
+ return 32;
505
+ }
506
+ using Vectorizedi::Vectorizedi;
507
+ Vectorized8() {}
508
+ Vectorized8(T v) { values = _mm256_set1_epi8(v); }
509
+ Vectorized8(T val1, T val2, T val3, T val4,
510
+ T val5, T val6, T val7, T val8,
511
+ T val9, T val10, T val11, T val12,
512
+ T val13, T val14, T val15, T val16,
513
+ T val17, T val18, T val19, T val20,
514
+ T val21, T val22, T val23, T val24,
515
+ T val25, T val26, T val27, T val28,
516
+ T val29, T val30, T val31, T val32) {
517
+ values = _mm256_setr_epi8(val1, val2, val3, val4, val5, val6, val7, val8,
518
+ val9, val10, val11, val12, val13, val14, val15, val16,
519
+ val17, val18, val19, val20, val21, val22, val23, val24,
520
+ val25, val26, val27, val28, val29, val30, val31, val32);
521
+ }
522
+ template <int64_t mask>
523
+ static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
524
+ __at_align__ T tmp_values[size()];
525
+ a.store(tmp_values);
526
+ if (mask & 0x01)
527
+ tmp_values[0] = _mm256_extract_epi8(b.values, 0);
528
+ if (mask & 0x02)
529
+ tmp_values[1] = _mm256_extract_epi8(b.values, 1);
530
+ if (mask & 0x04)
531
+ tmp_values[2] = _mm256_extract_epi8(b.values, 2);
532
+ if (mask & 0x08)
533
+ tmp_values[3] = _mm256_extract_epi8(b.values, 3);
534
+ if (mask & 0x10)
535
+ tmp_values[4] = _mm256_extract_epi8(b.values, 4);
536
+ if (mask & 0x20)
537
+ tmp_values[5] = _mm256_extract_epi8(b.values, 5);
538
+ if (mask & 0x40)
539
+ tmp_values[6] = _mm256_extract_epi8(b.values, 6);
540
+ if (mask & 0x80)
541
+ tmp_values[7] = _mm256_extract_epi8(b.values, 7);
542
+ if (mask & 0x100)
543
+ tmp_values[8] = _mm256_extract_epi8(b.values, 8);
544
+ if (mask & 0x200)
545
+ tmp_values[9] = _mm256_extract_epi8(b.values, 9);
546
+ if (mask & 0x400)
547
+ tmp_values[10] = _mm256_extract_epi8(b.values, 10);
548
+ if (mask & 0x800)
549
+ tmp_values[11] = _mm256_extract_epi8(b.values, 11);
550
+ if (mask & 0x1000)
551
+ tmp_values[12] = _mm256_extract_epi8(b.values, 12);
552
+ if (mask & 0x2000)
553
+ tmp_values[13] = _mm256_extract_epi8(b.values, 13);
554
+ if (mask & 0x4000)
555
+ tmp_values[14] = _mm256_extract_epi8(b.values, 14);
556
+ if (mask & 0x8000)
557
+ tmp_values[15] = _mm256_extract_epi8(b.values, 15);
558
+ if (mask & 0x010000)
559
+ tmp_values[16] = _mm256_extract_epi8(b.values, 16);
560
+ if (mask & 0x020000)
561
+ tmp_values[17] = _mm256_extract_epi8(b.values, 17);
562
+ if (mask & 0x040000)
563
+ tmp_values[18] = _mm256_extract_epi8(b.values, 18);
564
+ if (mask & 0x080000)
565
+ tmp_values[19] = _mm256_extract_epi8(b.values, 19);
566
+ if (mask & 0x100000)
567
+ tmp_values[20] = _mm256_extract_epi8(b.values, 20);
568
+ if (mask & 0x200000)
569
+ tmp_values[21] = _mm256_extract_epi8(b.values, 21);
570
+ if (mask & 0x400000)
571
+ tmp_values[22] = _mm256_extract_epi8(b.values, 22);
572
+ if (mask & 0x800000)
573
+ tmp_values[23] = _mm256_extract_epi8(b.values, 23);
574
+ if (mask & 0x1000000)
575
+ tmp_values[24] = _mm256_extract_epi8(b.values, 24);
576
+ if (mask & 0x2000000)
577
+ tmp_values[25] = _mm256_extract_epi8(b.values, 25);
578
+ if (mask & 0x4000000)
579
+ tmp_values[26] = _mm256_extract_epi8(b.values, 26);
580
+ if (mask & 0x8000000)
581
+ tmp_values[27] = _mm256_extract_epi8(b.values, 27);
582
+ if (mask & 0x10000000)
583
+ tmp_values[28] = _mm256_extract_epi8(b.values, 28);
584
+ if (mask & 0x20000000)
585
+ tmp_values[29] = _mm256_extract_epi8(b.values, 29);
586
+ if (mask & 0x40000000)
587
+ tmp_values[30] = _mm256_extract_epi8(b.values, 30);
588
+ if (mask & 0x80000000)
589
+ tmp_values[31] = _mm256_extract_epi8(b.values, 31);
590
+ return loadu(tmp_values);
591
+ }
592
+ static Vectorized<T> blendv(const Vectorized<T>& a, const Vectorized<T>& b,
593
+ const Vectorized<T>& mask) {
594
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
595
+ }
596
+ template <typename step_t>
597
+ static Vectorized<T> arange(T base = 0, step_t step = static_cast<step_t>(1)) {
598
+ return Vectorized<T>(
599
+ base, base + step, base + 2 * step, base + 3 * step,
600
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
601
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
602
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
603
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
604
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
605
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
606
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
607
+ }
608
+ static Vectorized<T>
609
+ set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
610
+ switch (count) {
611
+ case 0:
612
+ return a;
613
+ case 1:
614
+ return blend<0x1>(a, b);
615
+ case 2:
616
+ return blend<0x3>(a, b);
617
+ case 3:
618
+ return blend<0x7>(a, b);
619
+ case 4:
620
+ return blend<0xF>(a, b);
621
+ case 5:
622
+ return blend<0x1F>(a, b);
623
+ case 6:
624
+ return blend<0x3F>(a, b);
625
+ case 7:
626
+ return blend<0x7F>(a, b);
627
+ case 8:
628
+ return blend<0xFF>(a, b);
629
+ case 9:
630
+ return blend<0x1FF>(a, b);
631
+ case 10:
632
+ return blend<0x3FF>(a, b);
633
+ case 11:
634
+ return blend<0x7FF>(a, b);
635
+ case 12:
636
+ return blend<0xFFF>(a, b);
637
+ case 13:
638
+ return blend<0x1FFF>(a, b);
639
+ case 14:
640
+ return blend<0x3FFF>(a, b);
641
+ case 15:
642
+ return blend<0x7FFF>(a, b);
643
+ case 16:
644
+ return blend<0xFFFF>(a, b);
645
+ case 17:
646
+ return blend<0x1FFFF>(a, b);
647
+ case 18:
648
+ return blend<0x3FFFF>(a, b);
649
+ case 19:
650
+ return blend<0x7FFFF>(a, b);
651
+ case 20:
652
+ return blend<0xFFFFF>(a, b);
653
+ case 21:
654
+ return blend<0x1FFFFF>(a, b);
655
+ case 22:
656
+ return blend<0x3FFFFF>(a, b);
657
+ case 23:
658
+ return blend<0x7FFFFF>(a, b);
659
+ case 24:
660
+ return blend<0xFFFFFF>(a, b);
661
+ case 25:
662
+ return blend<0x1FFFFFF>(a, b);
663
+ case 26:
664
+ return blend<0x3FFFFFF>(a, b);
665
+ case 27:
666
+ return blend<0x7FFFFFF>(a, b);
667
+ case 28:
668
+ return blend<0xFFFFFFF>(a, b);
669
+ case 29:
670
+ return blend<0x1FFFFFFF>(a, b);
671
+ case 30:
672
+ return blend<0x3FFFFFFF>(a, b);
673
+ case 31:
674
+ return blend<0x7FFFFFFF>(a, b);
675
+ }
676
+ return b;
677
+ }
678
+ static Vectorized<T> loadu(const void* ptr) {
679
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
680
+ }
681
+ static Vectorized<T> loadu_one_fourth(const void* ptr) {
682
+ // Fast path if only load element number of 8.
683
+ // Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
684
+ // Because loadu(const void* ptr, T count) requires zero initialization for upper 128 bits.
685
+ // However, by using _mm256_castsi128_si256, the upper 128 bits of the result are undefined.
686
+ // TODO<leslie> We can use _mm256_zextsi128_si256 in the furture,
687
+ // since gcc 9.3 doesn't support it now.
688
+ __m128i input_128 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr));
689
+ return _mm256_castsi128_si256(input_128);
690
+ }
691
+ static Vectorized<T> loadu(const void* ptr, T count) {
692
+ __at_align__ T tmp_values[size()];
693
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
694
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
695
+ // instructions while a loop would be compiled to one instruction.
696
+ for (const auto i : c10::irange(size())) {
697
+ tmp_values[i] = 0;
698
+ }
699
+ std::memcpy(tmp_values, ptr, count * sizeof(T));
700
+ return loadu(tmp_values);
701
+ }
702
+ void store(void* ptr, int count = size()) const {
703
+ if (count == size()) {
704
+ // ptr need not to be aligned here. See
705
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
706
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
707
+ } else if (count > 0) {
708
+ if (count == 8) {
709
+ // Fast path if only store element number of 8
710
+ _mm_storel_epi64(reinterpret_cast<__m128i*>(ptr), _mm256_castsi256_si128(values));
711
+ } else {
712
+ __at_align__ T tmp_values[size()];
713
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
714
+ std::memcpy(ptr, tmp_values, count * sizeof(T));
715
+ }
716
+ }
717
+ }
718
+ const T& operator[](int idx) const = delete;
719
+ T& operator[](int idx) = delete;
720
+ Vectorized<T> real() const {
721
+ return *this;
722
+ }
723
+ Vectorized<T> imag() const {
724
+ return _mm256_set1_epi8(0);
725
+ }
726
+ Vectorized<T> conj() const {
727
+ return *this;
728
+ }
729
+ };
730
+
731
+ template<>
732
+ class Vectorized<int8_t>: public Vectorized8<int8_t> {
733
+ public:
734
+ using Vectorized8::Vectorized8;
735
+
736
+ Vectorized<int8_t> neg() const;
737
+
738
+ Vectorized<int8_t> abs() const {
739
+ return _mm256_abs_epi8(values);
740
+ }
741
+
742
+ Vectorized<int8_t> operator==(const Vectorized<int8_t>& other) const {
743
+ return _mm256_cmpeq_epi8(values, other.values);
744
+ }
745
+ Vectorized<int8_t> operator!=(const Vectorized<int8_t>& other) const {
746
+ return invert(_mm256_cmpeq_epi8(values, other.values));
747
+ }
748
+ Vectorized<int8_t> operator<(const Vectorized<int8_t>& other) const {
749
+ return _mm256_cmpgt_epi8(other.values, values);
750
+ }
751
+ Vectorized<int8_t> operator<=(const Vectorized<int8_t>& other) const {
752
+ return invert(_mm256_cmpgt_epi8(values, other.values));
753
+ }
754
+ Vectorized<int8_t> operator>(const Vectorized<int8_t>& other) const {
755
+ return other < *this;
756
+ }
757
+ Vectorized<int8_t> operator>=(const Vectorized<int8_t>& other) const {
758
+ return other <= *this;
759
+ }
760
+
761
+ Vectorized<int8_t> eq(const Vectorized<int8_t>& other) const;
762
+ Vectorized<int8_t> ne(const Vectorized<int8_t>& other) const;
763
+ Vectorized<int8_t> gt(const Vectorized<int8_t>& other) const;
764
+ Vectorized<int8_t> ge(const Vectorized<int8_t>& other) const;
765
+ Vectorized<int8_t> lt(const Vectorized<int8_t>& other) const;
766
+ Vectorized<int8_t> le(const Vectorized<int8_t>& other) const;
767
+ };
768
+
769
+ template<>
770
+ class Vectorized<uint8_t>: public Vectorized8<uint8_t> {
771
+ public:
772
+ using Vectorized8::Vectorized8;
773
+
774
+ Vectorized<uint8_t> neg() const;
775
+
776
+ Vectorized<uint8_t> abs() const {
777
+ return *this;
778
+ }
779
+
780
+ Vectorized<uint8_t> operator==(const Vectorized<uint8_t>& other) const {
781
+ return _mm256_cmpeq_epi8(values, other.values);
782
+ }
783
+ Vectorized<uint8_t> operator!=(const Vectorized<uint8_t>& other) const {
784
+ return invert(_mm256_cmpeq_epi8(values, other.values));
785
+ }
786
+ Vectorized<uint8_t> operator<(const Vectorized<uint8_t>& other) const {
787
+ __m256i max = _mm256_max_epu8(values, other.values);
788
+ return invert(_mm256_cmpeq_epi8(max, values));
789
+ }
790
+ Vectorized<uint8_t> operator<=(const Vectorized<uint8_t>& other) const {
791
+ __m256i max = _mm256_max_epu8(values, other.values);
792
+ return _mm256_cmpeq_epi8(max, other.values);
793
+ }
794
+ Vectorized<uint8_t> operator>(const Vectorized<uint8_t>& other) const {
795
+ return other < *this;
796
+ }
797
+ Vectorized<uint8_t> operator>=(const Vectorized<uint8_t>& other) const {
798
+ return other <= *this;
799
+ }
800
+
801
+ Vectorized<uint8_t> eq(const Vectorized<uint8_t>& other) const;
802
+ Vectorized<uint8_t> ne(const Vectorized<uint8_t>& other) const;
803
+ Vectorized<uint8_t> gt(const Vectorized<uint8_t>& other) const;
804
+ Vectorized<uint8_t> ge(const Vectorized<uint8_t>& other) const;
805
+ Vectorized<uint8_t> lt(const Vectorized<uint8_t>& other) const;
806
+ Vectorized<uint8_t> le(const Vectorized<uint8_t>& other) const;
807
+ };
808
+
809
+ template <>
810
+ Vectorized<int64_t> inline operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
811
+ return _mm256_add_epi64(a, b);
812
+ }
813
+
814
+ template <>
815
+ Vectorized<int32_t> inline operator+(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
816
+ return _mm256_add_epi32(a, b);
817
+ }
818
+
819
+ template <>
820
+ Vectorized<int16_t> inline operator+(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
821
+ return _mm256_add_epi16(a, b);
822
+ }
823
+
824
+ template <>
825
+ Vectorized<int8_t> inline operator+(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
826
+ return _mm256_add_epi8(a, b);
827
+ }
828
+
829
+ template <>
830
+ Vectorized<uint8_t> inline operator+(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
831
+ return _mm256_add_epi8(a, b);
832
+ }
833
+
834
+ template <>
835
+ Vectorized<int64_t> inline operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
836
+ return _mm256_sub_epi64(a, b);
837
+ }
838
+
839
+ template <>
840
+ Vectorized<int32_t> inline operator-(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
841
+ return _mm256_sub_epi32(a, b);
842
+ }
843
+
844
+ template <>
845
+ Vectorized<int16_t> inline operator-(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
846
+ return _mm256_sub_epi16(a, b);
847
+ }
848
+
849
+ template <>
850
+ Vectorized<int8_t> inline operator-(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
851
+ return _mm256_sub_epi8(a, b);
852
+ }
853
+
854
+ template <>
855
+ Vectorized<uint8_t> inline operator-(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
856
+ return _mm256_sub_epi8(a, b);
857
+ }
858
+
859
+ // Negation. Defined here so we can utilize operator-
860
+ inline Vectorized<int64_t> Vectorized<int64_t>::neg() const {
861
+ return Vectorized<int64_t>(0) - *this;
862
+ }
863
+
864
+ inline Vectorized<int32_t> Vectorized<int32_t>::neg() const {
865
+ return Vectorized<int32_t>(0) - *this;
866
+ }
867
+
868
+ inline Vectorized<int16_t> Vectorized<int16_t>::neg() const {
869
+ return Vectorized<int16_t>(0) - *this;
870
+ }
871
+
872
+ inline Vectorized<int8_t> Vectorized<int8_t>::neg() const {
873
+ return Vectorized<int8_t>(0) - *this;
874
+ }
875
+
876
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::neg() const {
877
+ return Vectorized<uint8_t>(0) - *this;
878
+ }
879
+
880
+ // Emulate operations with no native 64-bit support in avx,
881
+ // by extracting each element, performing the operation pointwise,
882
+ // then combining the results into a vector.
883
+ template <typename op_t>
884
+ Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const op_t& op) {
885
+ int64_t a0 = _mm256_extract_epi64(a, 0);
886
+ int64_t a1 = _mm256_extract_epi64(a, 1);
887
+ int64_t a2 = _mm256_extract_epi64(a, 2);
888
+ int64_t a3 = _mm256_extract_epi64(a, 3);
889
+
890
+ int64_t b0 = _mm256_extract_epi64(b, 0);
891
+ int64_t b1 = _mm256_extract_epi64(b, 1);
892
+ int64_t b2 = _mm256_extract_epi64(b, 2);
893
+ int64_t b3 = _mm256_extract_epi64(b, 3);
894
+
895
+ int64_t c0 = op(a0, b0);
896
+ int64_t c1 = op(a1, b1);
897
+ int64_t c2 = op(a2, b2);
898
+ int64_t c3 = op(a3, b3);
899
+
900
+ return _mm256_set_epi64x(c3, c2, c1, c0);
901
+ }
902
+
903
+ template <typename op_t>
904
+ Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const Vectorized<int64_t>& c, const op_t& op) {
905
+ int64_t a0 = _mm256_extract_epi64(a, 0);
906
+ int64_t a1 = _mm256_extract_epi64(a, 1);
907
+ int64_t a2 = _mm256_extract_epi64(a, 2);
908
+ int64_t a3 = _mm256_extract_epi64(a, 3);
909
+
910
+ int64_t b0 = _mm256_extract_epi64(b, 0);
911
+ int64_t b1 = _mm256_extract_epi64(b, 1);
912
+ int64_t b2 = _mm256_extract_epi64(b, 2);
913
+ int64_t b3 = _mm256_extract_epi64(b, 3);
914
+
915
+ int64_t c0 = _mm256_extract_epi64(c, 0);
916
+ int64_t c1 = _mm256_extract_epi64(c, 1);
917
+ int64_t c2 = _mm256_extract_epi64(c, 2);
918
+ int64_t c3 = _mm256_extract_epi64(c, 3);
919
+
920
+ int64_t d0 = op(a0, b0, c0);
921
+ int64_t d1 = op(a1, b1, c1);
922
+ int64_t d2 = op(a2, b2, c2);
923
+ int64_t d3 = op(a3, b3, c3);
924
+
925
+ return _mm256_set_epi64x(d3, d2, d1, d0);
926
+ }
927
+
928
+ // AVX2 has no intrinsic for int64_t multiply so it needs to be emulated
929
+ // This could be implemented more efficiently using epi32 instructions
930
+ // This is also technically avx compatible, but then we'll need AVX
931
+ // code for add as well.
932
+ // Note: intentionally ignores undefined behavior like (-lowest * -1).
933
+ template <>
934
+ Vectorized<int64_t> inline operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
935
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) __ubsan_ignore_undefined__ {return a_point * b_point;});
936
+ }
937
+
938
+ template <>
939
+ Vectorized<int32_t> inline operator*(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
940
+ return _mm256_mullo_epi32(a, b);
941
+ }
942
+
943
+ template <>
944
+ Vectorized<int16_t> inline operator*(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
945
+ return _mm256_mullo_epi16(a, b);
946
+ }
947
+
948
+ template <typename T, typename Op>
949
+ Vectorized<T> inline int_elementwise_binary_256(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
950
+ T values_a[Vectorized<T>::size()];
951
+ T values_b[Vectorized<T>::size()];
952
+ a.store(values_a);
953
+ b.store(values_b);
954
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
955
+ values_a[i] = op(values_a[i], values_b[i]);
956
+ }
957
+ return Vectorized<T>::loadu(values_a);
958
+ }
959
+
960
+ template <>
961
+ Vectorized<int8_t> inline operator*(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
962
+ // We don't have an instruction for multiplying int8_t
963
+ #ifndef CPU_CAPABILITY_AVX2
964
+ return int_elementwise_binary_256(a, b, std::multiplies<int8_t>());
965
+ #else
966
+ __m256i mask00FF = _mm256_set1_epi16(0x00FF);
967
+ __m256i a_lo = _mm256_srai_epi16(_mm256_slli_epi16(a, 8), 8);
968
+ __m256i b_lo = _mm256_srai_epi16(_mm256_slli_epi16(b, 8), 8);
969
+ __m256i a_hi = _mm256_srai_epi16(a, 8);
970
+ __m256i b_hi = _mm256_srai_epi16(b, 8);
971
+ __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF);
972
+ __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8);
973
+ __m256i res = _mm256_or_si256(res_hi, res_lo);
974
+ return res;
975
+ #endif
976
+ }
977
+
978
+ template <>
979
+ Vectorized<uint8_t> inline operator*(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
980
+ // We don't have an instruction for multiplying uint8_t
981
+ #ifndef CPU_CAPABILITY_AVX2
982
+ return int_elementwise_binary_256(a, b, std::multiplies<uint8_t>());
983
+ #else
984
+ __m256i mask00FF = _mm256_set1_epi16(0x00FF);
985
+ __m256i a_lo = _mm256_and_si256 (a, mask00FF);
986
+ __m256i b_lo = _mm256_and_si256 (b, mask00FF);
987
+ __m256i a_hi = _mm256_srli_epi16(a, 8);
988
+ __m256i b_hi = _mm256_srli_epi16(b, 8);
989
+ __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF);
990
+ __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8);
991
+ __m256i res = _mm256_or_si256(res_hi, res_lo);
992
+ return res;
993
+ #endif
994
+ }
995
+
996
+ template <>
997
+ Vectorized<int64_t> inline minimum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
998
+ #ifndef CPU_CAPABILITY_AVX2
999
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::min(a_point, b_point);});
1000
+ #else
1001
+ __m256i cmp = _mm256_cmpgt_epi64(a, b);
1002
+ return _mm256_blendv_epi8(a, b, cmp);
1003
+ #endif
1004
+ }
1005
+
1006
+ template <>
1007
+ Vectorized<int32_t> inline minimum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1008
+ return _mm256_min_epi32(a, b);
1009
+ }
1010
+
1011
+ template <>
1012
+ Vectorized<int16_t> inline minimum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1013
+ return _mm256_min_epi16(a, b);
1014
+ }
1015
+
1016
+ template <>
1017
+ Vectorized<int8_t> inline minimum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1018
+ return _mm256_min_epi8(a, b);
1019
+ }
1020
+
1021
+ template <>
1022
+ Vectorized<uint8_t> inline minimum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1023
+ return _mm256_min_epu8(a, b);
1024
+ }
1025
+
1026
+ template <>
1027
+ Vectorized<int64_t> inline maximum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1028
+ #ifndef CPU_CAPABILITY_AVX2
1029
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::max(a_point, b_point);});
1030
+ #else
1031
+ __m256i cmp = _mm256_cmpgt_epi64(a, b);
1032
+ return _mm256_blendv_epi8(b, a, cmp);
1033
+ #endif
1034
+ }
1035
+
1036
+ template <>
1037
+ Vectorized<int32_t> inline maximum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1038
+ return _mm256_max_epi32(a, b);
1039
+ }
1040
+
1041
+ template <>
1042
+ Vectorized<int16_t> inline maximum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1043
+ return _mm256_max_epi16(a, b);
1044
+ }
1045
+
1046
+ template <>
1047
+ Vectorized<int8_t> inline maximum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1048
+ return _mm256_max_epi8(a, b);
1049
+ }
1050
+
1051
+ template <>
1052
+ Vectorized<uint8_t> inline maximum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1053
+ return _mm256_max_epu8(a, b);
1054
+ }
1055
+
1056
+ template <>
1057
+ Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, const Vectorized<int64_t>& max_val) {
1058
+ #ifndef CPU_CAPABILITY_AVX2
1059
+ return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {return std::min(max_point, std::max(a_point, min_point));});
1060
+ #else
1061
+ return minimum(maximum(a, min_val), max_val);
1062
+ #endif
1063
+ }
1064
+
1065
+ template <>
1066
+ Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, const Vectorized<int32_t>& max_val) {
1067
+ return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val));
1068
+ }
1069
+
1070
+ template <>
1071
+ Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, const Vectorized<int16_t>& max_val) {
1072
+ return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val));
1073
+ }
1074
+
1075
+ template <>
1076
+ Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, const Vectorized<int8_t>& max_val) {
1077
+ return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val));
1078
+ }
1079
+
1080
+ template <>
1081
+ Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, const Vectorized<uint8_t>& max_val) {
1082
+ return _mm256_min_epu8(max_val, _mm256_max_epu8(a, min_val));
1083
+ }
1084
+
1085
+ template <>
1086
+ Vectorized<int64_t> inline clamp_max(const Vectorized<int64_t>& a, const Vectorized<int64_t>& max_val) {
1087
+ #ifndef CPU_CAPABILITY_AVX2
1088
+ return emulate(a, max_val, [](int64_t a_point, int64_t max_point) {return std::min(max_point, a_point);});
1089
+ #else
1090
+ return minimum(max_val, a);
1091
+ #endif
1092
+ }
1093
+
1094
+ template <>
1095
+ Vectorized<int32_t> inline clamp_max(const Vectorized<int32_t>& a, const Vectorized<int32_t>& max_val) {
1096
+ return _mm256_min_epi32(max_val, a);
1097
+ }
1098
+
1099
+ template <>
1100
+ Vectorized<int16_t> inline clamp_max(const Vectorized<int16_t>& a, const Vectorized<int16_t>& max_val) {
1101
+ return _mm256_min_epi16(max_val, a);
1102
+ }
1103
+
1104
+ template <>
1105
+ Vectorized<int8_t> inline clamp_max(const Vectorized<int8_t>& a, const Vectorized<int8_t>& max_val) {
1106
+ return _mm256_min_epi8(max_val, a);
1107
+ }
1108
+
1109
+ template <>
1110
+ Vectorized<uint8_t> inline clamp_max(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& max_val) {
1111
+ return _mm256_min_epu8(max_val, a);
1112
+ }
1113
+
1114
+ template <>
1115
+ Vectorized<int64_t> inline clamp_min(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val) {
1116
+ #ifndef CPU_CAPABILITY_AVX2
1117
+ return emulate(a, min_val, [](int64_t a_point, int64_t min_point) {return std::max(min_point, a_point);});
1118
+ #else
1119
+ return maximum(min_val, a);
1120
+ #endif
1121
+ }
1122
+
1123
+ template <>
1124
+ Vectorized<int32_t> inline clamp_min(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val) {
1125
+ return _mm256_max_epi32(min_val, a);
1126
+ }
1127
+
1128
+ template <>
1129
+ Vectorized<int16_t> inline clamp_min(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val) {
1130
+ return _mm256_max_epi16(min_val, a);
1131
+ }
1132
+
1133
+ template <>
1134
+ Vectorized<int8_t> inline clamp_min(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val) {
1135
+ return _mm256_max_epi8(min_val, a);
1136
+ }
1137
+
1138
+ template <>
1139
+ Vectorized<uint8_t> inline clamp_min(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val) {
1140
+ return _mm256_max_epu8(min_val, a);
1141
+ }
1142
+
1143
+ template<typename T>
1144
+ Vectorized<int32_t> inline convert_to_int32(const T* ptr) {
1145
+ return Vectorized<int32_t>::loadu(ptr);
1146
+ }
1147
+
1148
+ template<>
1149
+ Vectorized<int32_t> inline convert_to_int32<int8_t>(const int8_t* ptr) {
1150
+ return _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
1151
+ }
1152
+
1153
+ template<>
1154
+ Vectorized<int32_t> inline convert_to_int32<uint8_t>(const uint8_t* ptr) {
1155
+ return _mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
1156
+ }
1157
+
1158
+ template <>
1159
+ Vectorized<int64_t> inline operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1160
+ return int_elementwise_binary_256(a, b, std::divides<int64_t>());
1161
+ }
1162
+ template <>
1163
+ Vectorized<int32_t> inline operator/(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1164
+ return int_elementwise_binary_256(a, b, std::divides<int32_t>());
1165
+ }
1166
+ template <>
1167
+ Vectorized<int16_t> inline operator/(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1168
+ return int_elementwise_binary_256(a, b, std::divides<int16_t>());
1169
+ }
1170
+ template <>
1171
+ Vectorized<int8_t> inline operator/(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1172
+ return int_elementwise_binary_256(a, b, std::divides<int8_t>());
1173
+ }
1174
+ template <>
1175
+ Vectorized<uint8_t> inline operator/(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1176
+ return int_elementwise_binary_256(a, b, std::divides<uint8_t>());
1177
+ }
1178
+
1179
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1180
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
1181
+ return _mm256_and_si256(a, b);
1182
+ }
1183
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1184
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
1185
+ return _mm256_or_si256(a, b);
1186
+ }
1187
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1188
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
1189
+ return _mm256_xor_si256(a, b);
1190
+ }
1191
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1192
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
1193
+ return _mm256_xor_si256(a, _mm256_set1_epi32(-1));
1194
+ }
1195
+
1196
+ inline Vectorized<int64_t> Vectorized<int64_t>::eq(const Vectorized<int64_t>& other) const {
1197
+ return (*this == other) & Vectorized<int64_t>(1);
1198
+ }
1199
+
1200
+ inline Vectorized<int64_t> Vectorized<int64_t>::ne(const Vectorized<int64_t>& other) const {
1201
+ return (*this != other) & Vectorized<int64_t>(1);
1202
+ }
1203
+
1204
+ inline Vectorized<int64_t> Vectorized<int64_t>::gt(const Vectorized<int64_t>& other) const {
1205
+ return (*this > other) & Vectorized<int64_t>(1);
1206
+ }
1207
+
1208
+ inline Vectorized<int64_t> Vectorized<int64_t>::ge(const Vectorized<int64_t>& other) const {
1209
+ return (*this >= other) & Vectorized<int64_t>(1);
1210
+ }
1211
+
1212
+ inline Vectorized<int64_t> Vectorized<int64_t>::lt(const Vectorized<int64_t>& other) const {
1213
+ return (*this < other) & Vectorized<int64_t>(1);
1214
+ }
1215
+
1216
+ inline Vectorized<int64_t> Vectorized<int64_t>::le(const Vectorized<int64_t>& other) const {
1217
+ return (*this <= other) & Vectorized<int64_t>(1);
1218
+ }
1219
+
1220
+ inline Vectorized<int32_t> Vectorized<int32_t>::eq(const Vectorized<int32_t>& other) const {
1221
+ return (*this == other) & Vectorized<int32_t>(1);
1222
+ }
1223
+
1224
+ inline Vectorized<int32_t> Vectorized<int32_t>::ne(const Vectorized<int32_t>& other) const {
1225
+ return (*this != other) & Vectorized<int32_t>(1);
1226
+ }
1227
+
1228
+ inline Vectorized<int32_t> Vectorized<int32_t>::gt(const Vectorized<int32_t>& other) const {
1229
+ return (*this > other) & Vectorized<int32_t>(1);
1230
+ }
1231
+
1232
+ inline Vectorized<int32_t> Vectorized<int32_t>::ge(const Vectorized<int32_t>& other) const {
1233
+ return (*this >= other) & Vectorized<int32_t>(1);
1234
+ }
1235
+
1236
+ inline Vectorized<int32_t> Vectorized<int32_t>::lt(const Vectorized<int32_t>& other) const {
1237
+ return (*this < other) & Vectorized<int32_t>(1);
1238
+ }
1239
+
1240
+ inline Vectorized<int32_t> Vectorized<int32_t>::le(const Vectorized<int32_t>& other) const {
1241
+ return (*this <= other) & Vectorized<int32_t>(1);
1242
+ }
1243
+
1244
+ inline Vectorized<int16_t> Vectorized<int16_t>::eq(const Vectorized<int16_t>& other) const {
1245
+ return (*this == other) & Vectorized<int16_t>(1);
1246
+ }
1247
+
1248
+ inline Vectorized<int16_t> Vectorized<int16_t>::ne(const Vectorized<int16_t>& other) const {
1249
+ return (*this != other) & Vectorized<int16_t>(1);
1250
+ }
1251
+
1252
+ inline Vectorized<int16_t> Vectorized<int16_t>::gt(const Vectorized<int16_t>& other) const {
1253
+ return (*this > other) & Vectorized<int16_t>(1);
1254
+ }
1255
+
1256
+ inline Vectorized<int16_t> Vectorized<int16_t>::ge(const Vectorized<int16_t>& other) const {
1257
+ return (*this >= other) & Vectorized<int16_t>(1);
1258
+ }
1259
+
1260
+ inline Vectorized<int16_t> Vectorized<int16_t>::lt(const Vectorized<int16_t>& other) const {
1261
+ return (*this < other) & Vectorized<int16_t>(1);
1262
+ }
1263
+
1264
+ inline Vectorized<int16_t> Vectorized<int16_t>::le(const Vectorized<int16_t>& other) const {
1265
+ return (*this <= other) & Vectorized<int16_t>(1);
1266
+ }
1267
+
1268
+ inline Vectorized<int8_t> Vectorized<int8_t>::eq(const Vectorized<int8_t>& other) const {
1269
+ return (*this == other) & Vectorized<int8_t>(1);
1270
+ }
1271
+
1272
+ inline Vectorized<int8_t> Vectorized<int8_t>::ne(const Vectorized<int8_t>& other) const {
1273
+ return (*this != other) & Vectorized<int8_t>(1);
1274
+ }
1275
+
1276
+ inline Vectorized<int8_t> Vectorized<int8_t>::gt(const Vectorized<int8_t>& other) const {
1277
+ return (*this > other) & Vectorized<int8_t>(1);
1278
+ }
1279
+
1280
+ inline Vectorized<int8_t> Vectorized<int8_t>::ge(const Vectorized<int8_t>& other) const {
1281
+ return (*this >= other) & Vectorized<int8_t>(1);
1282
+ }
1283
+
1284
+ inline Vectorized<int8_t> Vectorized<int8_t>::lt(const Vectorized<int8_t>& other) const {
1285
+ return (*this < other) & Vectorized<int8_t>(1);
1286
+ }
1287
+
1288
+ inline Vectorized<int8_t> Vectorized<int8_t>::le(const Vectorized<int8_t>& other) const {
1289
+ return (*this <= other) & Vectorized<int8_t>(1);
1290
+ }
1291
+
1292
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::eq(const Vectorized<uint8_t>& other) const {
1293
+ return (*this == other) & Vectorized<uint8_t>(1);
1294
+ }
1295
+
1296
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ne(const Vectorized<uint8_t>& other) const {
1297
+ return (*this != other) & Vectorized<uint8_t>(1);
1298
+ }
1299
+
1300
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::gt(const Vectorized<uint8_t>& other) const {
1301
+ return (*this > other) & Vectorized<uint8_t>(1);
1302
+ }
1303
+
1304
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ge(const Vectorized<uint8_t>& other) const {
1305
+ return (*this >= other) & Vectorized<uint8_t>(1);
1306
+ }
1307
+
1308
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::lt(const Vectorized<uint8_t>& other) const {
1309
+ return (*this < other) & Vectorized<uint8_t>(1);
1310
+ }
1311
+
1312
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::le(const Vectorized<uint8_t>& other) const {
1313
+ return (*this <= other) & Vectorized<uint8_t>(1);
1314
+ }
1315
+
1316
+ template <bool left_shift>
1317
+ Vectorized<int16_t> inline shift_256_16(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1318
+ // No vector instruction for shifting int16_t, so emulating it instead.
1319
+
1320
+ // Control masks for shuffle operation, treating 256 bits as an
1321
+ // array of 16-bit elements, and considering pairs of neighboring
1322
+ // elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and
1323
+ // M!=N) is set so that shuffle will move element with index M from
1324
+ // input pair into element with index N in output pair, and element
1325
+ // with index M in output pair will be set to all 0s.
1326
+ __m256i ctl_0_1 = _mm256_set_epi8(29, 28, 0x80, 0x80, 25, 24, 0x80, 0x80,
1327
+ 21, 20, 0x80, 0x80, 17, 16, 0x80, 0x80,
1328
+ 13, 12, 0x80, 0x80, 9, 8, 0x80, 0x80,
1329
+ 5, 4, 0x80, 0x80, 1, 0, 0x80, 0x80);
1330
+ __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 31, 30, 0x80, 0x80, 27, 26,
1331
+ 0x80, 0x80, 23, 22, 0x80, 0x80, 19, 18,
1332
+ 0x80, 0x80, 15, 14, 0x80, 0x80, 11, 10,
1333
+ 0x80, 0x80, 7, 6, 0x80, 0x80, 3, 2);
1334
+
1335
+ // Masks for bitwise and operation, treating 256 bits as an array of
1336
+ // 16-bit elements, and considering them in pairs of neighboring
1337
+ // elements. A mask named "keep_M" (M in [0,1]) is set so that
1338
+ // bitwise and will copy element with index M from input pair into
1339
+ // element with the same index in output pair, while the other
1340
+ // element in output pair will be set to all 0s.
1341
+ __m256i keep_0 = _mm256_set1_epi32(0xFFFF);
1342
+ __m256i keep_1 = _mm256_set1_epi32(0xFFFF0000);
1343
+
1344
+ // Take each 16-bit element with idx%2==0 from input array to be
1345
+ // shifted and extend it to 32 bits so that 0s are added to the
1346
+ // right. Then, perform shifting on this 32-bit number. Upper 16
1347
+ // bits will be proper result of shifting original 16-bit number, so
1348
+ // write them to result array, into the same position from which
1349
+ // corresponding input element is taken. Also, make sure that
1350
+ // result array elements with idx%2!=0 are set to all 0s.
1351
+ //
1352
+ // Note that number of bits to shift for is extended to 32 bits by
1353
+ // adding 0s to the left. That means this number is not properly
1354
+ // sign-extended for negative values. However, number of bits to
1355
+ // shift is treated as an unsigned integer by respective shift
1356
+ // intrinsics anyway so if negative then either with or without
1357
+ // proper sign extension, it will be interpreted as a number greater
1358
+ // than 32, and the shifting result will be the same.
1359
+ __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_1);
1360
+ __m256i b0 = _mm256_and_si256(b, keep_0);
1361
+ __m256i c0;
1362
+ if (left_shift)
1363
+ c0 = _mm256_sllv_epi32(a0, b0);
1364
+ else
1365
+ c0 = _mm256_srav_epi32(a0, b0);
1366
+ c0 = _mm256_shuffle_epi8(c0, ctl_1_0);
1367
+
1368
+ // Peform shifting the same way for input array elements with
1369
+ // idx%2==1.
1370
+ __m256i a1 = _mm256_and_si256(a, keep_1);
1371
+ __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
1372
+ __m256i c1;
1373
+ if (left_shift)
1374
+ c1 = _mm256_sllv_epi32(a1, b1);
1375
+ else
1376
+ c1 = _mm256_srav_epi32(a1, b1);
1377
+ c1 = _mm256_and_si256(c1, keep_1);
1378
+
1379
+ // Merge partial results into the final result.
1380
+ __m256i c = _mm256_or_si256(c0, c1);
1381
+
1382
+ return c;
1383
+ }
1384
+
1385
+ template <bool left_shift, typename T, typename std::enable_if_t<std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value, int> = 0>
1386
+ Vectorized<T> inline shift_256_8(const Vectorized<T>& a, const Vectorized<T>& b) {
1387
+ // No vector instruction for shifting int8_t/uint8_t, so emulating
1388
+ // it instead.
1389
+
1390
+ // Control masks for shuffle operation, treating 256 bits as an
1391
+ // array of 8-bit elements, and considering quadruples of
1392
+ // neighboring elements. Specifially, a mask named "ctl_M_N" (M,N
1393
+ // in [0,1,2,3], and M!=N) is set so that shuffle will move element
1394
+ // with index M from input quadruple into element with index N in
1395
+ // output quadruple, and other elements in output quadruple will be
1396
+ // set to all 0s.
1397
+ __m256i ctl_0_3 = _mm256_set_epi8(28, 0x80, 0x80, 0x80, 24, 0x80, 0x80, 0x80,
1398
+ 20, 0x80, 0x80, 0x80, 16, 0x80, 0x80, 0x80,
1399
+ 12, 0x80, 0x80, 0x80, 8, 0x80, 0x80, 0x80,
1400
+ 4, 0x80, 0x80, 0x80, 0, 0x80, 0x80, 0x80);
1401
+ __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 29, 0x80, 0x80, 0x80, 25,
1402
+ 0x80, 0x80, 0x80, 21, 0x80, 0x80, 0x80, 17,
1403
+ 0x80, 0x80, 0x80, 13, 0x80, 0x80, 0x80, 9,
1404
+ 0x80, 0x80, 0x80, 5, 0x80, 0x80, 0x80, 1);
1405
+ __m256i ctl_1_3 = _mm256_set_epi8(29, 0x80, 0x80, 0x80, 25, 0x80, 0x80, 0x80,
1406
+ 21, 0x80, 0x80, 0x80, 17, 0x80, 0x80, 0x80,
1407
+ 13, 0x80, 0x80, 0x80, 9, 0x80, 0x80, 0x80,
1408
+ 5, 0x80, 0x80, 0x80, 1, 0x80, 0x80, 0x80);
1409
+ __m256i ctl_2_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 30, 0x80, 0x80, 0x80, 26,
1410
+ 0x80, 0x80, 0x80, 22, 0x80, 0x80, 0x80, 18,
1411
+ 0x80, 0x80, 0x80, 14, 0x80, 0x80, 0x80, 10,
1412
+ 0x80, 0x80, 0x80, 6, 0x80, 0x80, 0x80, 2);
1413
+ __m256i ctl_2_3 = _mm256_set_epi8(30, 0x80, 0x80, 0x80, 26, 0x80, 0x80, 0x80,
1414
+ 22, 0x80, 0x80, 0x80, 18, 0x80, 0x80, 0x80,
1415
+ 14, 0x80, 0x80, 0x80, 10, 0x80, 0x80, 0x80,
1416
+ 6, 0x80, 0x80, 0x80, 2, 0x80, 0x80, 0x80);
1417
+ __m256i ctl_3_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 31, 0x80, 0x80, 0x80, 27,
1418
+ 0x80, 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19,
1419
+ 0x80, 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11,
1420
+ 0x80, 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3);
1421
+ __m256i ctl_3_1 = _mm256_set_epi8(0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, 0x80,
1422
+ 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80,
1423
+ 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80,
1424
+ 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80);
1425
+ __m256i ctl_3_2 = _mm256_set_epi8(0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, 0x80,
1426
+ 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, 0x80,
1427
+ 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, 0x80,
1428
+ 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80, 0x80);
1429
+
1430
+ // Masks for bitwise and operation, treating 256 bits as an array of
1431
+ // 8-bit elements, and considering them in quadruples of neighboring
1432
+ // elements. A mask named "keep_M" (M in [0,1,2,3]) is set so that
1433
+ // bitwise and will copy element with index M from input quadruple
1434
+ // into element with the same index in output quadruple, while the
1435
+ // other elements in output quadruple will be set to all 0s.
1436
+ __m256i keep_0 = _mm256_set1_epi32(0xFF);
1437
+ __m256i keep_3 = _mm256_set1_epi32(0xFF000000);
1438
+
1439
+ // Take each 8-bit element with idx%4==0 from input array to be
1440
+ // shifted and extend it to 32 bits so that 0s are added to the
1441
+ // right. Then, perform shifting on this 32-bit number. Upper 8
1442
+ // bits will be proper result of shifting original 8-bit number, so
1443
+ // write them to result array, into the same position from which
1444
+ // corresponding input element is taken. Also, make sure that
1445
+ // result array elements with idx%4!=0 are set to all 0s.
1446
+ //
1447
+ // Note that number of bits to shift for is extended to 32 bits by
1448
+ // adding 0s to the left. That means this number is not properly
1449
+ // sign-extended for negative values. However, number of bits to
1450
+ // shift is treated as an unsigned integer by respective shift
1451
+ // intrinsics anyway so if negative then either with or without
1452
+ // proper sign extension, it will be interpreted as a number greater
1453
+ // than 32, and the shifting result will be the same.
1454
+ __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_3);
1455
+ __m256i b0 = _mm256_and_si256(b, keep_0);
1456
+ __m256i c0;
1457
+ if (left_shift)
1458
+ c0 = _mm256_sllv_epi32(a0, b0);
1459
+ else
1460
+ if constexpr (std::is_same_v<T, int8_t>)
1461
+ c0 = _mm256_srav_epi32(a0, b0);
1462
+ else
1463
+ c0 = _mm256_srlv_epi32(a0, b0);
1464
+ c0 = _mm256_shuffle_epi8(c0, ctl_3_0);
1465
+
1466
+ // Peform shifting the same way for input array elements with
1467
+ // idx%4==1.
1468
+ __m256i a1 = _mm256_shuffle_epi8(a, ctl_1_3);
1469
+ __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
1470
+ __m256i c1;
1471
+ if (left_shift)
1472
+ c1 = _mm256_sllv_epi32(a1, b1);
1473
+ else
1474
+ if constexpr (std::is_same_v<T, int8_t>)
1475
+ c1 = _mm256_srav_epi32(a1, b1);
1476
+ else
1477
+ c1 = _mm256_srlv_epi32(a1, b1);
1478
+ c1 = _mm256_shuffle_epi8(c1, ctl_3_1);
1479
+
1480
+ // Peform shifting the same way for input array elements with
1481
+ // idx%4==2.
1482
+ __m256i a2 = _mm256_shuffle_epi8(a, ctl_2_3);
1483
+ __m256i b2 = _mm256_shuffle_epi8(b, ctl_2_0);
1484
+ __m256i c2;
1485
+ if (left_shift)
1486
+ c2 = _mm256_sllv_epi32(a2, b2);
1487
+ else
1488
+ if constexpr (std::is_same_v<T, int8_t>)
1489
+ c2 = _mm256_srav_epi32(a2, b2);
1490
+ else
1491
+ c2 = _mm256_srlv_epi32(a2, b2);
1492
+ c2 = _mm256_shuffle_epi8(c2, ctl_3_2);
1493
+
1494
+ // Peform shifting the same way for input array elements with
1495
+ // idx%4==3.
1496
+ __m256i a3 = _mm256_and_si256(a, keep_3);
1497
+ __m256i b3 = _mm256_shuffle_epi8(b, ctl_3_0);
1498
+ __m256i c3;
1499
+ if (left_shift)
1500
+ c3 = _mm256_sllv_epi32(a3, b3);
1501
+ else
1502
+ if constexpr (std::is_same_v<T, int8_t>)
1503
+ c3 = _mm256_srav_epi32(a3, b3);
1504
+ else
1505
+ c3 = _mm256_srlv_epi32(a3, b3);
1506
+ c3 = _mm256_and_si256(c3, keep_3);
1507
+
1508
+ // Merge partial results into the final result.
1509
+ __m256i c01 = _mm256_or_si256(c0, c1);
1510
+ __m256i c23 = _mm256_or_si256(c2, c3);
1511
+ __m256i c = _mm256_or_si256(c01, c23);
1512
+
1513
+ return c;
1514
+ }
1515
+
1516
+ template <>
1517
+ Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1518
+ return _mm256_sllv_epi64(a, b);
1519
+ }
1520
+
1521
+ template <>
1522
+ Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1523
+ return _mm256_sllv_epi32(a, b);
1524
+ }
1525
+
1526
+ template <>
1527
+ Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1528
+ return shift_256_16<true>(a, b);
1529
+ }
1530
+
1531
+ template <>
1532
+ Vectorized<int8_t> inline operator<<(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1533
+ return shift_256_8<true>(a, b);
1534
+ }
1535
+
1536
+ template <>
1537
+ Vectorized<uint8_t> inline operator<<(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1538
+ return shift_256_8<true>(a, b);
1539
+ }
1540
+
1541
+ template <>
1542
+ Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1543
+ // No vector instruction for right arithmetic shifting int64_t, so emulating it
1544
+ // instead.
1545
+
1546
+ // Clamp the shift values such that shift values < 0 and > 64 are changed to 64
1547
+ // which results in -1 for negative input and 0 for non-negative input.
1548
+ __m256i zero = _mm256_set1_epi64x(0);
1549
+ __m256i max_shift = _mm256_set1_epi64x(64);
1550
+ __m256i mask = _mm256_or_si256(_mm256_cmpgt_epi64(zero, b), _mm256_cmpgt_epi64(b, max_shift));
1551
+ __m256i shift = _mm256_blendv_epi8(b, max_shift, mask);
1552
+ // Shift the number logically to the right, thus filling the most
1553
+ // significant bits with 0s. Then, replace these bits with the sign
1554
+ // bit.
1555
+ __m256i sign_bits = _mm256_cmpgt_epi64(zero, a);
1556
+ __m256i sign_shift = _mm256_sub_epi64(max_shift, shift);
1557
+ __m256i sign_ext = _mm256_sllv_epi64(sign_bits, sign_shift);
1558
+ __m256i c = _mm256_srlv_epi64(a, shift);
1559
+ c = _mm256_or_si256(c, sign_ext);
1560
+
1561
+ return c;
1562
+ }
1563
+
1564
+ template <>
1565
+ Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1566
+ return _mm256_srav_epi32(a, b);
1567
+ }
1568
+
1569
+ template <>
1570
+ Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1571
+ return shift_256_16<false>(a, b);
1572
+ }
1573
+
1574
+ template <>
1575
+ Vectorized<int8_t> inline operator>>(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1576
+ return shift_256_8<false>(a, b);
1577
+ }
1578
+
1579
+ template <>
1580
+ Vectorized<uint8_t> inline operator>>(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1581
+ return shift_256_8<false>(a, b);
1582
+ }
1583
+
1584
+ #endif
1585
+
1586
+ }} // namespace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h ADDED
@@ -0,0 +1,1335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <ATen/native/quantized/AffineQuantizerBase.h>
9
+
10
+ #include <c10/util/irange.h>
11
+ #include <c10/util/qint32.h>
12
+ #include <c10/util/qint8.h>
13
+ #include <c10/util/quint8.h>
14
+
15
+ #include <array>
16
+ #include <cmath>
17
+
18
+ // This file defines Vectorized<> for the quantized types.
19
+ //
20
+ //
21
+ // Currently, we simply use these classes as efficient converters between
22
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
23
+ // where doing the arithmetic in full-precision is acceptable (e.g.
24
+ // elementwise operators).
25
+ //
26
+ //
27
+ // Conversions are as follows:
28
+ // Vectorized<qint8> -> 4x Vectorized<float>
29
+ // Vectorized<quint8> -> 4x Vectorized<float>
30
+ // Vectorized<qint32> -> 1x Vectorized<float>
31
+ //
32
+ // The size of the returned float vector is specified by the special
33
+ // constexpr function float_num_vecs. The type of the value returned
34
+ // from dequantize (and expected as an argument to quantize) is
35
+ // specified by float_vec_return_type.
36
+ //
37
+ // When writing kernels with these vectors, it is expected that floating-
38
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
39
+ // iterations.
40
+
41
+ namespace at::vec {
42
+ inline namespace CPU_CAPABILITY {
43
+
44
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
45
+
46
+ struct Vectorizedqi {
47
+ protected:
48
+ __m256i vals __attribute__((aligned(64)));
49
+
50
+ public:
51
+ Vectorizedqi() {}
52
+ Vectorizedqi(__m256i v) : vals(v) {}
53
+ operator __m256i() const {
54
+ return vals;
55
+ }
56
+ };
57
+
58
+ template <typename T>
59
+ __m256i pack_saturate_and_clamp(
60
+ __m256i first,
61
+ __m256i second,
62
+ T min_val,
63
+ T max_val);
64
+
65
+ template <>
66
+ inline __m256i pack_saturate_and_clamp<int32_t>(
67
+ __m256i /*first*/,
68
+ __m256i /*second*/,
69
+ int32_t /*min_val*/,
70
+ int32_t /*max_val*/) {
71
+ // This function is for linkage only, will not be used
72
+ AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported");
73
+ }
74
+
75
+ template <>
76
+ inline __m256i pack_saturate_and_clamp<int8_t>(
77
+ __m256i first,
78
+ __m256i second,
79
+ int8_t min_val,
80
+ int8_t max_val) {
81
+ __m256i packed_and_sat = _mm256_packs_epi16(first, second);
82
+ return _mm256_max_epi8(
83
+ _mm256_set1_epi8(min_val),
84
+ _mm256_min_epi8(packed_and_sat, _mm256_set1_epi8(max_val)));
85
+ }
86
+
87
+ template <>
88
+ inline __m256i pack_saturate_and_clamp<uint8_t>(
89
+ __m256i first,
90
+ __m256i second,
91
+ uint8_t min_val,
92
+ uint8_t max_val) {
93
+ __m256i packed_and_sat = _mm256_packus_epi16(first, second);
94
+ return _mm256_max_epu8(
95
+ _mm256_set1_epi8(min_val),
96
+ _mm256_min_epu8(packed_and_sat, _mm256_set1_epi8(max_val)));
97
+ }
98
+
99
+ template <typename T>
100
+ typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, at::vec::Vectorized<float>>::type
101
+ inline convert_int8_to_float(at::vec::Vectorized<T> src) {
102
+ // Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
103
+ // Only handle first 8*8 bits
104
+ __m128i input_128 = _mm256_castsi256_si128(src);
105
+ // Convert from 8*uint8/int8 to 8*int32
106
+ __m256i input_256_int32;
107
+ if constexpr (std::is_same_v<T, uint8_t>)
108
+ input_256_int32 = _mm256_cvtepu8_epi32(input_128);
109
+ else
110
+ input_256_int32 = _mm256_cvtepi8_epi32(input_128);
111
+ // Convert from 8*int32 to 8*float
112
+ return _mm256_cvtepi32_ps(input_256_int32);
113
+ }
114
+
115
+ template <typename T>
116
+ typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, at::vec::Vectorized<T>>::type
117
+ inline convert_float_to_int8(at::vec::Vectorized<float> src) {
118
+ // Convert from float32 to int32 with truncation
119
+ __m256i x_values_int32 = _mm256_cvttps_epi32(src);
120
+
121
+ // Convert from int32 to int16 using signed saturation
122
+ __m256i xy_packed_v = _mm256_packs_epi32(x_values_int32, x_values_int32);
123
+
124
+ constexpr auto min_val = std::numeric_limits<T>::min();
125
+ constexpr auto max_val = std::numeric_limits<T>::max();
126
+
127
+ // Convert from int16 to uint8/int8 using unsigned saturation
128
+ __m256i xyzw_clamped_v = pack_saturate_and_clamp<T>(
129
+ xy_packed_v, xy_packed_v, min_val, max_val);
130
+ __m256i permute_mask_v =
131
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
132
+ return _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
133
+ }
134
+
135
+ template <typename T>
136
+ inline void __attribute__((always_inline)) QuantizeAvx2(
137
+ const float* src,
138
+ T* dst,
139
+ int len,
140
+ float inverse_scale,
141
+ int64_t zero_point) {
142
+ constexpr int VLEN = 8;
143
+ constexpr auto min_val = std::numeric_limits<T>::min();
144
+ constexpr auto max_val = std::numeric_limits<T>::max();
145
+ const __m256i min_v = _mm256_set1_epi32(min_val);
146
+ const __m256i max_v = _mm256_set1_epi32(max_val);
147
+ // This is the largest int32 value < int32_max exactly representable in float
148
+ constexpr int32_t int32_float_max_val =
149
+ std::numeric_limits<int32_t>::max() - 127;
150
+ int i = 0;
151
+ __m256 inverse_scale_v = _mm256_set1_ps(inverse_scale);
152
+ // clang-format off
153
+ static const __m256i shuffle_mask_v = _mm256_set_epi8(
154
+ 0xff, 0xff, 0xff, 0xff,
155
+ 0xff, 0xff, 0xff, 0xff,
156
+ 0xff, 0xff, 0xff, 0xff,
157
+ 0x0c, 0x08, 0x04, 0x00,
158
+ 0xff, 0xff, 0xff, 0xff,
159
+ 0xff, 0xff, 0xff, 0xff,
160
+ 0xff, 0xff, 0xff, 0xff,
161
+ 0x0c, 0x08, 0x04, 0x00);
162
+ // clang-format on
163
+ __m256i permute_mask_v =
164
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
165
+ __m256i permute_mask_l8_v =
166
+ _mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00);
167
+ int len_aligned = len / (VLEN * 4) * (VLEN * 4);
168
+ for (; i < len_aligned; i += 4 * VLEN) {
169
+ // x
170
+ __m256 x_vals = _mm256_load_ps(src + i);
171
+ __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
172
+ // If the floating point value is greater than int32_max,
173
+ // _mm256_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to
174
+ // Clip at int32_float_max_val to avoid this.
175
+ x_transformed_v =
176
+ _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
177
+ // y
178
+ __m256 y_vals = _mm256_load_ps(src + i + VLEN);
179
+ __m256 y_transformed_v = _mm256_mul_ps(y_vals, inverse_scale_v);
180
+ y_transformed_v =
181
+ _mm256_min_ps(y_transformed_v, _mm256_set1_ps(int32_float_max_val));
182
+ // z
183
+ __m256 z_vals = _mm256_load_ps(src + i + 2 * VLEN);
184
+ __m256 z_transformed_v = _mm256_mul_ps(z_vals, inverse_scale_v);
185
+ z_transformed_v =
186
+ _mm256_min_ps(z_transformed_v, _mm256_set1_ps(int32_float_max_val));
187
+ // w
188
+ __m256 w_vals = _mm256_load_ps(src + i + 3 * VLEN);
189
+ __m256 w_transformed_v = _mm256_mul_ps(w_vals, inverse_scale_v);
190
+ w_transformed_v =
191
+ _mm256_min_ps(w_transformed_v, _mm256_set1_ps(int32_float_max_val));
192
+
193
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
194
+ __m256i y_rounded_v = _mm256_cvtps_epi32(y_transformed_v);
195
+ __m256i z_rounded_v = _mm256_cvtps_epi32(z_transformed_v);
196
+ __m256i w_rounded_v = _mm256_cvtps_epi32(w_transformed_v);
197
+
198
+ // add zero point
199
+ x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
200
+ y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point));
201
+ z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point));
202
+ w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point));
203
+
204
+ __m256i xy_packed_v = _mm256_packs_epi32(x_rounded_v, y_rounded_v);
205
+ __m256i zw_packed_v = _mm256_packs_epi32(z_rounded_v, w_rounded_v);
206
+ __m256i xyzw_clamped_v =
207
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
208
+
209
+ xyzw_clamped_v =
210
+ _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
211
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst + i), xyzw_clamped_v);
212
+ }
213
+
214
+ // Additional 8-lane AVX2 version to take advantage when len is smaller
215
+ // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM)
216
+ for (; i < len / VLEN * VLEN; i += VLEN) {
217
+ __m256 x_vals = _mm256_load_ps(src + i);
218
+ __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
219
+ x_transformed_v =
220
+ _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
221
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
222
+ x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
223
+ __m256i x_clipped_v =
224
+ _mm256_max_epi32(min_v, _mm256_min_epi32(max_v, x_rounded_v));
225
+
226
+ x_clipped_v = _mm256_shuffle_epi8(x_clipped_v, shuffle_mask_v);
227
+ x_clipped_v = _mm256_permutevar8x32_epi32(x_clipped_v, permute_mask_l8_v);
228
+ _mm_storel_epi64(
229
+ reinterpret_cast<__m128i*>(dst + i),
230
+ _mm256_castsi256_si128(x_clipped_v));
231
+ }
232
+
233
+ for (; i < len; ++i) {
234
+ float transformed = src[i] * inverse_scale;
235
+
236
+ // Not exactly the same behavior as the vectorized code.
237
+ // The vectorized code above always rounds to even in halfway cases
238
+ // (https://software.intel.com/en-us/node/523819), but std::nearbyint
239
+ // does the same only when the current rounding mode is FE_TONEAREST.
240
+ // However, in practice, this should not be a problem because most cases
241
+ // use the default rounding mode FE_TONEAREST.
242
+ // Note that we cannot implement the same behavior as the vectorized code
243
+ // using std::round because it does rounding away from zero in halfway
244
+ // cases.
245
+ transformed = zero_point + std::nearbyint(transformed);
246
+ float clipped =
247
+ std::min(std::max(transformed, float(min_val)), float(max_val));
248
+ dst[i] = clipped;
249
+ }
250
+ }
251
+
252
+ template<>
253
+ struct Vectorized<c10::qint32> : public Vectorizedqi {
254
+ using size_type = int;
255
+ static constexpr size_type size() {
256
+ return 8;
257
+ }
258
+
259
+ static constexpr int float_num_vecs() {
260
+ return 1;
261
+ }
262
+
263
+ static constexpr int int_num_vecs() {
264
+ return 1;
265
+ }
266
+
267
+ using float_vec_return_type = std::array<Vectorized<float>, 1>;
268
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
269
+ using value_type = c10::qint32::underlying;
270
+
271
+ public:
272
+ using Vectorizedqi::Vectorizedqi;
273
+ Vectorized() {}
274
+
275
+ Vectorized(__m256i vals_) { vals = vals_;}
276
+
277
+ // Broadcast constructor
278
+ Vectorized(const c10::qint32& val) {
279
+ value_type uw = val.val_;
280
+ vals = _mm256_set1_epi32(uw);
281
+ }
282
+
283
+ void store(void* ptr, int count = size()) const {
284
+ if (count != size()) {
285
+ memcpy(ptr, &vals, count * sizeof(value_type));
286
+ } else {
287
+ _mm256_storeu_si256((__m256i*)ptr, vals);
288
+ }
289
+ }
290
+
291
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
292
+ return Vectorized<c10::qint32>(ptr);
293
+ }
294
+
295
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
296
+ __at_align__ value_type tmp_values[size()];
297
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
298
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
299
+ // instructions while a loop would be compiled to one instruction.
300
+ for (const auto i : c10::irange(size())) {
301
+ tmp_values[i] = 0;
302
+ }
303
+ std::memcpy(
304
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
305
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
306
+ }
307
+
308
+ float_vec_return_type dequantize(
309
+ Vectorized<float> scale,
310
+ Vectorized<float> /*zero_point*/,
311
+ Vectorized<float> scale_zp_premul) const {
312
+ __m256 float_vals = _mm256_cvtepi32_ps(vals);
313
+ return {vec::fmadd(scale, Vectorized<float>(float_vals), scale_zp_premul)};
314
+ }
315
+
316
+ float_vec_return_type dequantize(
317
+ Vectorized<float> scale,
318
+ Vectorized<float> zero_point) const {
319
+ __m256 float_vals = _mm256_cvtepi32_ps(vals);
320
+ return {(Vectorized<float>(float_vals) - zero_point) * scale};
321
+ }
322
+
323
+ static Vectorized<c10::qint32> quantize(
324
+ const float_vec_return_type& rhs,
325
+ float scale,
326
+ int32_t zero_point,
327
+ float /*inverse_scale*/) {
328
+ Vectorized<c10::qint32> retval;
329
+ auto rhs_data = (__m256)rhs[0];
330
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
331
+ scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 8);
332
+ return retval;
333
+ }
334
+
335
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
336
+ return _mm256_max_epi32(vals, b.vals);
337
+ }
338
+
339
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
340
+ return _mm256_min_epi32(vals, b.vals);
341
+ }
342
+
343
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
344
+ return maximum(zero_point);
345
+ }
346
+
347
+ Vectorized<c10::qint32> relu6(
348
+ Vectorized<c10::qint32> zero_point,
349
+ Vectorized<c10::qint32> q_six) {
350
+ return _mm256_min_epi32(
351
+ _mm256_max_epi32(vals, zero_point.vals), q_six.vals);
352
+ }
353
+
354
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
355
+ return {_mm256_sub_epi32(vals, b)};
356
+ }
357
+
358
+ static Vectorized<c10::qint32> requantize_from_int(
359
+ const int_vec_return_type& inp,
360
+ float multiplier,
361
+ int32_t zero_point) {
362
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
363
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
364
+
365
+ __m256 scaled = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier_v);
366
+ __m256i rounded = _mm256_cvtps_epi32(scaled);
367
+ return _mm256_add_epi32(rounded, zero_point_v);
368
+ }
369
+
370
+ private:
371
+ // Load from memory constructor
372
+ Vectorized(const void* ptr) {
373
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
374
+ }
375
+ };
376
+
377
+ template <>
378
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
379
+ return a.maximum(b);
380
+ }
381
+
382
+ template <>
383
+ Vectorized<c10::qint32> inline operator*(
384
+ const Vectorized<c10::qint32>& a,
385
+ const Vectorized<c10::qint32>& b) {
386
+ return _mm256_mullo_epi32(a, b);
387
+ }
388
+
389
+ template <>
390
+ Vectorized<c10::qint32> inline operator+(
391
+ const Vectorized<c10::qint32>& a,
392
+ const Vectorized<c10::qint32>& b) {
393
+ return _mm256_add_epi32(a, b);
394
+ }
395
+
396
+ /*
397
+ * Convert values from int32 back to int8/uint8
398
+ */
399
+ template <typename T>
400
+ __m256i RequantizeAvx2(
401
+ const std::array<Vectorized<c10::qint32>, 4>& inp,
402
+ __m256 multiplier,
403
+ __m256i zp) {
404
+ static_assert(
405
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
406
+ "Only int8_t/uint8_t are supported");
407
+ constexpr auto min_val = std::numeric_limits<T>::min();
408
+ constexpr auto max_val = std::numeric_limits<T>::max();
409
+ __m256i permute_mask_v =
410
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
411
+ __m256 x_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier);
412
+ __m256 y_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[1]), multiplier);
413
+ __m256 z_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[2]), multiplier);
414
+ __m256 w_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[3]), multiplier);
415
+
416
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_scaled_v);
417
+ __m256i y_rounded_v = _mm256_cvtps_epi32(y_scaled_v);
418
+ __m256i z_rounded_v = _mm256_cvtps_epi32(z_scaled_v);
419
+ __m256i w_rounded_v = _mm256_cvtps_epi32(w_scaled_v);
420
+
421
+ /* Add zero point */
422
+ __m256i x_v = _mm256_add_epi32(x_rounded_v, zp);
423
+ __m256i y_v = _mm256_add_epi32(y_rounded_v, zp);
424
+ __m256i z_v = _mm256_add_epi32(z_rounded_v, zp);
425
+ __m256i w_v = _mm256_add_epi32(w_rounded_v, zp);
426
+
427
+ /* Pack to int16_t and saturate */
428
+ __m256i xy_packed_v = _mm256_packs_epi32(x_v, y_v);
429
+ __m256i zw_packed_v = _mm256_packs_epi32(z_v, w_v);
430
+
431
+ __m256i xyzw_clamped_v =
432
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
433
+
434
+ /*
435
+ * xyzw_clamped_v has results in the following layout so we need to
436
+ * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7
437
+ */
438
+ xyzw_clamped_v = _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
439
+ return xyzw_clamped_v;
440
+ }
441
+
442
+ template<>
443
+ struct Vectorized<c10::qint8> : public Vectorizedqi {
444
+ static constexpr int size() {
445
+ return 32;
446
+ }
447
+
448
+ static constexpr int float_num_vecs() {
449
+ return 4;
450
+ }
451
+
452
+ static constexpr int int_num_vecs() {
453
+ return 4;
454
+ }
455
+
456
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
457
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
458
+ using value_type = typename c10::qint8::underlying;
459
+
460
+ public:
461
+ using Vectorizedqi::Vectorizedqi;
462
+
463
+ Vectorized() {}
464
+ Vectorized(__m256i vals_) { vals = vals_;}
465
+
466
+ // Broadcast constructor
467
+ Vectorized(const c10::qint8& val) {
468
+ value_type uw = val.val_;
469
+ vals = _mm256_set1_epi8(uw);
470
+ }
471
+
472
+ // This is needed because the compiler emits awful code for the default
473
+ // constructor for moving the enum
474
+ // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
475
+ C10_CLANG_DIAGNOSTIC_PUSH()
476
+ #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
477
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
478
+ #endif
479
+ Vectorized(const Vectorized<c10::qint8>& other) : Vectorizedqi(other.vals) { }
480
+ C10_CLANG_DIAGNOSTIC_POP()
481
+
482
+ void store(void* ptr, int count = size()) const {
483
+ if (count != size()) {
484
+ memcpy(ptr, &vals, count * sizeof(value_type));
485
+ } else {
486
+ _mm256_storeu_si256((__m256i*)ptr, vals);
487
+ }
488
+ }
489
+
490
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
491
+ return Vectorized<c10::qint8>(ptr);
492
+ }
493
+
494
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
495
+ __at_align__ value_type tmp_values[size()];
496
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
497
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
498
+ // instructions while a loop would be compiled to one instruction.
499
+ for (const auto i : c10::irange(size())) {
500
+ tmp_values[i] = 0;
501
+ }
502
+ std::memcpy(
503
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
504
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
505
+ }
506
+
507
+ private:
508
+ __m256i cvtepi8_epi32(__m128i epi8_vals) const {
509
+ return _mm256_cvtepi8_epi32(epi8_vals);
510
+ }
511
+
512
+ public:
513
+ float_vec_return_type dequantize(
514
+ Vectorized<float> scale,
515
+ Vectorized<float> /*zero_point*/,
516
+ Vectorized<float> scale_neg_zp_premul) const {
517
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
518
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
519
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
520
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
521
+
522
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
523
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
524
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
525
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
526
+
527
+ auto val0 =
528
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_neg_zp_premul);
529
+ auto val1 =
530
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_neg_zp_premul);
531
+ auto val2 =
532
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_neg_zp_premul);
533
+ auto val3 =
534
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_neg_zp_premul);
535
+ return {val0, val1, val2, val3};
536
+ }
537
+
538
+ float_vec_return_type dequantize(
539
+ Vectorized<float> scale,
540
+ Vectorized<float> zero_point) const {
541
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
542
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
543
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
544
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
545
+
546
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
547
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
548
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
549
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
550
+
551
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
552
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
553
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
554
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
555
+ return {val0, val1, val2, val3};
556
+ }
557
+
558
+ static Vectorized<c10::qint8> quantize(
559
+ const float_vec_return_type& rhs,
560
+ float /*scale*/,
561
+ int32_t zero_point,
562
+ float inverse_scale) {
563
+ auto* rhs_data = (float*)rhs.data();
564
+ int8_t quantized_values[32];
565
+ QuantizeAvx2<value_type>(
566
+ rhs_data, quantized_values, 32, inverse_scale, zero_point);
567
+ return Vectorized<c10::qint8>::loadu(quantized_values);
568
+ }
569
+
570
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
571
+ return _mm256_max_epi8(vals, b.vals);
572
+ }
573
+
574
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
575
+ return _mm256_min_epi8(vals, b.vals);
576
+ }
577
+
578
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
579
+ return maximum(zero_point);
580
+ }
581
+
582
+ Vectorized<c10::qint8> relu6(
583
+ Vectorized<c10::qint8> zero_point,
584
+ Vectorized<c10::qint8> q_six) {
585
+ return _mm256_min_epi8(
586
+ _mm256_max_epi8(vals, zero_point.vals), q_six.vals);
587
+ }
588
+
589
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
590
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
591
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
592
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
593
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
594
+
595
+ __m256i int32_val0 = cvtepi8_epi32(int_val0);
596
+ __m256i int32_val1 = cvtepi8_epi32(int_val1);
597
+ __m256i int32_val2 = cvtepi8_epi32(int_val2);
598
+ __m256i int32_val3 = cvtepi8_epi32(int_val3);
599
+
600
+ __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
601
+ __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
602
+ __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
603
+ __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
604
+
605
+ __m256i int32_b0 = cvtepi8_epi32(int_b0);
606
+ __m256i int32_b1 = cvtepi8_epi32(int_b1);
607
+ __m256i int32_b2 = cvtepi8_epi32(int_b2);
608
+ __m256i int32_b3 = cvtepi8_epi32(int_b3);
609
+
610
+ __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
611
+ __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
612
+ __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
613
+ __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
614
+
615
+ return {Vectorized<c10::qint32>(res_0),
616
+ Vectorized<c10::qint32>(res_1),
617
+ Vectorized<c10::qint32>(res_2),
618
+ Vectorized<c10::qint32>(res_3)};
619
+ }
620
+
621
+ static Vectorized<c10::qint8> requantize_from_int(
622
+ const int_vec_return_type& inp,
623
+ float multiplier,
624
+ int32_t zero_point) {
625
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
626
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
627
+ return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
628
+ }
629
+
630
+ private:
631
+ // Load from memory constructor
632
+ Vectorized(const void* ptr) {
633
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
634
+ }
635
+ };
636
+
637
+ template <>
638
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
639
+ return a.maximum(b);
640
+ }
641
+
642
+ template<>
643
+ struct Vectorized<c10::quint8> : public Vectorizedqi {
644
+ static constexpr int size() {
645
+ return 32;
646
+ }
647
+
648
+ static constexpr int float_num_vecs() {
649
+ return 4;
650
+ }
651
+
652
+ static constexpr int int_num_vecs() {
653
+ return 4;
654
+ }
655
+
656
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
657
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
658
+ using value_type = typename c10::quint8::underlying;
659
+
660
+ public:
661
+ using Vectorizedqi::Vectorizedqi;
662
+ Vectorized() {}
663
+
664
+ Vectorized(__m256i vals_) { vals = vals_;}
665
+
666
+ // Broadcast constructor
667
+ Vectorized(const c10::quint8& val) {
668
+ value_type uw = val.val_;
669
+ vals = _mm256_set1_epi8(uw);
670
+ }
671
+
672
+ // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
673
+ C10_CLANG_DIAGNOSTIC_PUSH()
674
+ #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
675
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
676
+ #endif
677
+ Vectorized(const Vectorized<c10::quint8>& other) : Vectorizedqi(other.vals) { }
678
+ C10_CLANG_DIAGNOSTIC_POP()
679
+
680
+ void store(void* ptr, int count = size()) const {
681
+ if (count != size()) {
682
+ memcpy(ptr, &vals, count * sizeof(value_type));
683
+ } else {
684
+ _mm256_storeu_si256((__m256i*)ptr, vals);
685
+ }
686
+ }
687
+
688
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
689
+ return Vectorized<c10::quint8>(ptr);
690
+ }
691
+
692
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
693
+ __at_align__ value_type tmp_values[size()];
694
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
695
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
696
+ // instructions while a loop would be compiled to one instruction.
697
+ for (const auto i : c10::irange(size())) {
698
+ tmp_values[i] = 0;
699
+ }
700
+ std::memcpy(
701
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
702
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
703
+ }
704
+
705
+ private:
706
+ __m256i cvtepu8_epi32(__m128i epu8_vals) const {
707
+ return _mm256_cvtepu8_epi32(epu8_vals);
708
+ }
709
+
710
+ public:
711
+ float_vec_return_type dequantize(
712
+ Vectorized<float> scale,
713
+ Vectorized<float> /*zero_point*/,
714
+ Vectorized<float> scale_zp_premul) const {
715
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
716
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
717
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
718
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
719
+
720
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
721
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
722
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
723
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
724
+
725
+ auto val0 =
726
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_zp_premul);
727
+ auto val1 =
728
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_zp_premul);
729
+ auto val2 =
730
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_zp_premul);
731
+ auto val3 =
732
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_zp_premul);
733
+ return {val0, val1, val2, val3};
734
+ }
735
+
736
+ float_vec_return_type dequantize(
737
+ Vectorized<float> scale,
738
+ Vectorized<float> zero_point) const {
739
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
740
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
741
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
742
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
743
+
744
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
745
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
746
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
747
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
748
+
749
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
750
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
751
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
752
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
753
+ return {val0, val1, val2, val3};
754
+ }
755
+
756
+ static Vectorized<c10::quint8> quantize(
757
+ const float_vec_return_type& rhs,
758
+ float /*scale*/,
759
+ int32_t zero_point,
760
+ float inverse_scale) {
761
+ auto* rhs_data = (float*)rhs.data();
762
+ uint8_t quantized_values[32];
763
+ QuantizeAvx2<value_type>(
764
+ rhs_data, quantized_values, 32, inverse_scale, zero_point);
765
+ return Vectorized<c10::quint8>::loadu(quantized_values);
766
+ }
767
+
768
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
769
+ return _mm256_max_epu8(vals, b.vals);
770
+ }
771
+
772
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
773
+ return _mm256_min_epu8(vals, b.vals);
774
+ }
775
+
776
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
777
+ return maximum(zero_point);
778
+ }
779
+
780
+ Vectorized<c10::quint8> relu6(
781
+ Vectorized<c10::quint8> zero_point,
782
+ Vectorized<c10::quint8> q_six) {
783
+ return _mm256_min_epu8(
784
+ _mm256_max_epu8(vals, zero_point.vals), q_six.vals);
785
+ }
786
+
787
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
788
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
789
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
790
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
791
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
792
+
793
+ __m256i int32_val0 = cvtepu8_epi32(int_val0);
794
+ __m256i int32_val1 = cvtepu8_epi32(int_val1);
795
+ __m256i int32_val2 = cvtepu8_epi32(int_val2);
796
+ __m256i int32_val3 = cvtepu8_epi32(int_val3);
797
+
798
+ __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
799
+ __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
800
+ __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
801
+ __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
802
+
803
+ __m256i int32_b0 = cvtepu8_epi32(int_b0);
804
+ __m256i int32_b1 = cvtepu8_epi32(int_b1);
805
+ __m256i int32_b2 = cvtepu8_epi32(int_b2);
806
+ __m256i int32_b3 = cvtepu8_epi32(int_b3);
807
+
808
+ __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
809
+ __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
810
+ __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
811
+ __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
812
+ return {Vectorized<c10::qint32>(res_0),
813
+ Vectorized<c10::qint32>(res_1),
814
+ Vectorized<c10::qint32>(res_2),
815
+ Vectorized<c10::qint32>(res_3)};
816
+ }
817
+
818
+ static Vectorized<c10::quint8> requantize_from_int(
819
+ const int_vec_return_type& inp,
820
+ float multiplier,
821
+ int32_t zero_point) {
822
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
823
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
824
+ return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
825
+ }
826
+
827
+ private:
828
+
829
+ // Load from memory constructor
830
+ Vectorized(const void* ptr) {
831
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
832
+ }
833
+ };
834
+
835
+ template <>
836
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
837
+ return a.maximum(b);
838
+ }
839
+
840
+ #else
841
+
842
+ // NOTE: These are low-performance implementations that we fall back on
843
+ // if we are not building with AVX2. This may not be an issue, because
844
+ // currently for quantization we assume the user has at least AVX512
845
+ // installed, so these can simply act as a reference implementation.
846
+ //
847
+ // If in the future we relax this requirement (AVX2+), we should probably
848
+ // revisit these implementations
849
+
850
+ template <
851
+ typename T,
852
+ typename float_vec_return_type_,
853
+ typename int_vec_return_type_,
854
+ int size_>
855
+ struct VectorizedQuantizedConverter {
856
+ static constexpr int size() {
857
+ return size_;
858
+ }
859
+
860
+ static constexpr int float_num_vecs() {
861
+ return size() / 8;
862
+ }
863
+
864
+ static constexpr int int_num_vecs() {
865
+ return size() / 8;
866
+ }
867
+
868
+ using float_vec_return_type = float_vec_return_type_;
869
+ using int_vec_return_type = int_vec_return_type_;
870
+
871
+ using value_type = typename T::underlying;
872
+ std::array<value_type, size_> vals;
873
+
874
+ VectorizedQuantizedConverter(T val) {
875
+ for (const auto i : c10::irange(size())) {
876
+ vals[i] = val.val_;
877
+ }
878
+ }
879
+
880
+ VectorizedQuantizedConverter(const void* ptr) {
881
+ memcpy(vals.data(), ptr, sizeof(value_type) * size());
882
+ }
883
+
884
+ void store(void* ptr, int count = size()) const {
885
+ memcpy(ptr, vals.data(), count * sizeof(value_type));
886
+ }
887
+
888
+ float_vec_return_type dequantize(
889
+ Vectorized<float> scale,
890
+ Vectorized<float> zero_point,
891
+ Vectorized<float> /*scale_zp_premul*/) const {
892
+ float_vec_return_type rv;
893
+ for (const auto i : c10::irange(float_num_vecs())) {
894
+ float tmp_vals[8];
895
+ for (const auto j : c10::irange(8)) {
896
+ tmp_vals[j] = at::native::dequantize_val<T>(
897
+ scale[j], zero_point[j], T(vals[8 * i + j]));
898
+ }
899
+ rv[i] = Vectorized<float>(tmp_vals[0],
900
+ tmp_vals[1],
901
+ tmp_vals[2],
902
+ tmp_vals[3],
903
+ tmp_vals[4],
904
+ tmp_vals[5],
905
+ tmp_vals[6],
906
+ tmp_vals[7]);
907
+ }
908
+ return rv;
909
+ }
910
+
911
+ float_vec_return_type dequantize(
912
+ Vectorized<float> scale,
913
+ Vectorized<float> zero_point) const {
914
+ Vectorized<float> scale_zp_premul;
915
+ return dequantize(scale, zero_point, scale_zp_premul);
916
+ }
917
+
918
+ protected:
919
+ VectorizedQuantizedConverter() {}
920
+ };
921
+
922
+ template <>
923
+ struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter<
924
+ c10::qint32,
925
+ std::array<Vectorized<float>, 1>,
926
+ std::array<Vectorized<c10::qint32>, 1>,
927
+ 8> {
928
+ Vectorized()
929
+ : VectorizedQuantizedConverter<
930
+ c10::qint32,
931
+ std::array<Vectorized<float>, 1>,
932
+ std::array<Vectorized<c10::qint32>, 1>,
933
+ 8>() {}
934
+ Vectorized(c10::qint32 val)
935
+ : VectorizedQuantizedConverter<
936
+ c10::qint32,
937
+ std::array<Vectorized<float>, 1>,
938
+ std::array<Vectorized<c10::qint32>, 1>,
939
+ 8>(val) {}
940
+ Vectorized(const void* ptr)
941
+ : VectorizedQuantizedConverter<
942
+ c10::qint32,
943
+ std::array<Vectorized<float>, 1>,
944
+ std::array<Vectorized<c10::qint32>, 1>,
945
+ 8>(ptr) {}
946
+
947
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
948
+ return Vectorized<c10::qint32>(ptr);
949
+ }
950
+
951
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
952
+ __at_align__ value_type tmp_values[size()];
953
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
954
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
955
+ // instructions while a loop would be compiled to one instruction.
956
+ for (const auto i : c10::irange(size())) {
957
+ tmp_values[i] = 0;
958
+ }
959
+ std::memcpy(
960
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
961
+ return Vectorized<c10::qint32>(tmp_values);
962
+ }
963
+
964
+ static Vectorized<c10::qint32> quantize(
965
+ const float_vec_return_type& rhs,
966
+ float scale,
967
+ int32_t zero_point,
968
+ float /*inverse_scale*/) {
969
+ std::array<value_type, size()> qvals;
970
+ std::array<float, float_num_vecs() * 8> float_vals;
971
+
972
+ for (const auto i : c10::irange(float_num_vecs())) {
973
+ rhs[i].store(&float_vals[i * 8], 8);
974
+ }
975
+
976
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
977
+ scale,
978
+ zero_point,
979
+ float_vals.data(),
980
+ (c10::qint32*)qvals.data(),
981
+ 8 * float_num_vecs());
982
+
983
+ return Vectorized<c10::qint32>::loadu(qvals.data());
984
+ }
985
+
986
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
987
+ Vectorized<c10::qint32> retval;
988
+ for (const auto i : c10::irange(size())) {
989
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
990
+ }
991
+ return retval;
992
+ }
993
+
994
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
995
+ Vectorized<c10::qint32> retval;
996
+ for (const auto i : c10::irange(size())) {
997
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
998
+ }
999
+ return retval;
1000
+ }
1001
+
1002
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
1003
+ return maximum(zero_point);
1004
+ }
1005
+
1006
+
1007
+ Vectorized<c10::qint32> relu6(
1008
+ Vectorized<c10::qint32> zero_point,
1009
+ Vectorized<c10::qint32> q_six) {
1010
+ Vectorized<c10::qint32> retval;
1011
+ for (const auto i : c10::irange(size())) {
1012
+ retval.vals[i] = std::min<value_type>(
1013
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1014
+ }
1015
+ return retval;
1016
+ }
1017
+
1018
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
1019
+ int_vec_return_type retval;
1020
+ for (const auto i : c10::irange(size())) {
1021
+ retval[0].vals[i] = vals[i] - b.vals[i];
1022
+ }
1023
+ return retval;
1024
+ }
1025
+
1026
+ static Vectorized<c10::qint32> requantize_from_int(
1027
+ const int_vec_return_type& inp,
1028
+ float multiplier,
1029
+ int32_t zero_point) {
1030
+ Vectorized<c10::qint32> retval;
1031
+ for (const auto i : c10::irange(size())) {
1032
+ retval.vals[i] =
1033
+ std::nearbyint(static_cast<float>(inp[0].vals[i]) * multiplier) +
1034
+ zero_point;
1035
+ }
1036
+ return retval;
1037
+ }
1038
+ };
1039
+
1040
+ template <>
1041
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
1042
+ return a.maximum(b);
1043
+ }
1044
+
1045
+ template <>
1046
+ Vectorized<c10::qint32> inline operator*(
1047
+ const Vectorized<c10::qint32>& a,
1048
+ const Vectorized<c10::qint32>& b) {
1049
+ Vectorized<c10::qint32> retval;
1050
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1051
+ retval.vals[i] = a.vals[i] * b.vals[i];
1052
+ }
1053
+ return retval;
1054
+ }
1055
+
1056
+ template <>
1057
+ Vectorized<c10::qint32> inline operator+(
1058
+ const Vectorized<c10::qint32>& a,
1059
+ const Vectorized<c10::qint32>& b) {
1060
+ Vectorized<c10::qint32> retval;
1061
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1062
+ retval.vals[i] = a.vals[i] + b.vals[i];
1063
+ }
1064
+ return retval;
1065
+ }
1066
+
1067
+ template <>
1068
+ struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter<
1069
+ c10::qint8,
1070
+ std::array<Vectorized<float>, 4>,
1071
+ std::array<Vectorized<c10::qint32>, 4>,
1072
+ 32> {
1073
+ Vectorized()
1074
+ : VectorizedQuantizedConverter<
1075
+ c10::qint8,
1076
+ std::array<Vectorized<float>, 4>,
1077
+ std::array<Vectorized<c10::qint32>, 4>,
1078
+ 32>() {}
1079
+ Vectorized(c10::qint8 val)
1080
+ : VectorizedQuantizedConverter<
1081
+ c10::qint8,
1082
+ std::array<Vectorized<float>, 4>,
1083
+ std::array<Vectorized<c10::qint32>, 4>,
1084
+ 32>(val) {}
1085
+ Vectorized(const void* ptr)
1086
+ : VectorizedQuantizedConverter<
1087
+ c10::qint8,
1088
+ std::array<Vectorized<float>, 4>,
1089
+ std::array<Vectorized<c10::qint32>, 4>,
1090
+ 32>(ptr) {}
1091
+
1092
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
1093
+ return Vectorized<c10::qint8>(ptr);
1094
+ }
1095
+
1096
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
1097
+ __at_align__ value_type tmp_values[size()];
1098
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1099
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1100
+ // instructions while a loop would be compiled to one instruction.
1101
+ for (const auto i : c10::irange(size())) {
1102
+ tmp_values[i] = 0;
1103
+ }
1104
+ std::memcpy(
1105
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1106
+ return Vectorized<c10::qint8>(tmp_values);
1107
+ }
1108
+
1109
+ static Vectorized<c10::qint8> quantize(
1110
+ const float_vec_return_type& rhs,
1111
+ float scale,
1112
+ int32_t zero_point,
1113
+ float /*inverse_scale*/) {
1114
+ std::array<value_type, size()> qvals;
1115
+ std::array<float, float_num_vecs() * 8> float_vals;
1116
+
1117
+ for (const auto i : c10::irange(float_num_vecs())) {
1118
+ rhs[i].store(&float_vals[i * 8], 8);
1119
+ }
1120
+
1121
+ at::native::quantize_vec<c10::qint8>(
1122
+ scale,
1123
+ zero_point,
1124
+ float_vals.data(),
1125
+ (c10::qint8*)qvals.data(),
1126
+ 8 * float_num_vecs());
1127
+
1128
+ return Vectorized<c10::qint8>::loadu(qvals.data());
1129
+ }
1130
+
1131
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
1132
+ Vectorized<c10::qint8> retval;
1133
+ for (const auto i : c10::irange(size())) {
1134
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1135
+ }
1136
+ return retval;
1137
+ }
1138
+
1139
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
1140
+ Vectorized<c10::qint8> retval;
1141
+ for (const auto i : c10::irange(size())) {
1142
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1143
+ }
1144
+ return retval;
1145
+ }
1146
+
1147
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
1148
+ return maximum(zero_point);
1149
+ }
1150
+
1151
+ Vectorized<c10::qint8> relu6(
1152
+ Vectorized<c10::qint8> zero_point,
1153
+ Vectorized<c10::qint8> q_six) {
1154
+ Vectorized<c10::qint8> retval;
1155
+ for (const auto i : c10::irange(size())) {
1156
+ retval.vals[i] = std::min<value_type>(
1157
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1158
+ }
1159
+ return retval;
1160
+ }
1161
+
1162
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
1163
+ int_vec_return_type retval;
1164
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1165
+ for (const auto i : c10::irange(int_num_vecs())) {
1166
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1167
+ retval[i].vals[j] =
1168
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1169
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1170
+ }
1171
+ }
1172
+ return retval;
1173
+ }
1174
+ static Vectorized<c10::qint8> requantize_from_int(
1175
+ const int_vec_return_type& inp,
1176
+ float multiplier,
1177
+ int32_t zero_point) {
1178
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1179
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1180
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1181
+ Vectorized<c10::qint8> retval;
1182
+ for (const auto i : c10::irange(int_num_vecs())) {
1183
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1184
+ int32_t rounded =
1185
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1186
+ zero_point;
1187
+ retval.vals[i * elem_per_int_vec + j] =
1188
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1189
+ }
1190
+ }
1191
+ return retval;
1192
+ }
1193
+ };
1194
+
1195
+ template <>
1196
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
1197
+ return a.maximum(b);
1198
+ }
1199
+
1200
+ template <>
1201
+ struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter<
1202
+ c10::quint8,
1203
+ std::array<Vectorized<float>, 4>,
1204
+ std::array<Vectorized<c10::qint32>, 4>,
1205
+ 32> {
1206
+ Vectorized()
1207
+ : VectorizedQuantizedConverter<
1208
+ c10::quint8,
1209
+ std::array<Vectorized<float>, 4>,
1210
+ std::array<Vectorized<c10::qint32>, 4>,
1211
+ 32>() {}
1212
+ Vectorized(c10::quint8 val)
1213
+ : VectorizedQuantizedConverter<
1214
+ c10::quint8,
1215
+ std::array<Vectorized<float>, 4>,
1216
+ std::array<Vectorized<c10::qint32>, 4>,
1217
+ 32>(val) {}
1218
+ Vectorized(const void* ptr)
1219
+ : VectorizedQuantizedConverter<
1220
+ c10::quint8,
1221
+ std::array<Vectorized<float>, 4>,
1222
+ std::array<Vectorized<c10::qint32>, 4>,
1223
+ 32>(ptr) {}
1224
+
1225
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
1226
+ return Vectorized<c10::quint8>(ptr);
1227
+ }
1228
+
1229
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
1230
+ __at_align__ value_type tmp_values[size()];
1231
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1232
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1233
+ // instructions while a loop would be compiled to one instruction.
1234
+ for (const auto i : c10::irange(size())) {
1235
+ tmp_values[i] = 0;
1236
+ }
1237
+ std::memcpy(
1238
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1239
+ return Vectorized<c10::quint8>(tmp_values);
1240
+ }
1241
+
1242
+ static Vectorized<c10::quint8> quantize(
1243
+ const float_vec_return_type& rhs,
1244
+ float scale,
1245
+ int32_t zero_point,
1246
+ float /*inverse_scale*/) {
1247
+ std::array<value_type, size()> qvals;
1248
+ std::array<float, float_num_vecs() * 8> float_vals;
1249
+
1250
+ for (const auto i : c10::irange(float_num_vecs())) {
1251
+ rhs[i].store(&float_vals[i * 8], 8);
1252
+ }
1253
+
1254
+ at::native::quantize_vec<c10::quint8>(
1255
+ scale,
1256
+ zero_point,
1257
+ float_vals.data(),
1258
+ (c10::quint8*)qvals.data(),
1259
+ 8 * float_num_vecs());
1260
+
1261
+ return Vectorized<c10::quint8>::loadu(qvals.data());
1262
+ }
1263
+
1264
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
1265
+ Vectorized<c10::quint8> retval;
1266
+ for (const auto i : c10::irange(size())) {
1267
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1268
+ }
1269
+ return retval;
1270
+ }
1271
+
1272
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
1273
+ Vectorized<c10::quint8> retval;
1274
+ for (const auto i : c10::irange(size())) {
1275
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1276
+ }
1277
+ return retval;
1278
+ }
1279
+
1280
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
1281
+ return maximum(zero_point);
1282
+ }
1283
+
1284
+
1285
+ Vectorized<c10::quint8> relu6(
1286
+ Vectorized<c10::quint8> zero_point,
1287
+ Vectorized<c10::quint8> q_six) {
1288
+ Vectorized<c10::quint8> retval;
1289
+ for (const auto i : c10::irange(size())) {
1290
+ retval.vals[i] = std::min<value_type>(
1291
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1292
+ }
1293
+ return retval;
1294
+ }
1295
+
1296
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
1297
+ int_vec_return_type retval;
1298
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1299
+ for (const auto i : c10::irange(int_num_vecs())) {
1300
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1301
+ retval[i].vals[j] =
1302
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1303
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1304
+ }
1305
+ }
1306
+ return retval;
1307
+ }
1308
+ static Vectorized<c10::quint8> requantize_from_int(
1309
+ const int_vec_return_type& inp,
1310
+ float multiplier,
1311
+ int32_t zero_point) {
1312
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1313
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1314
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1315
+ Vectorized<c10::quint8> retval;
1316
+ for (const auto i : c10::irange(int_num_vecs())) {
1317
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1318
+ int32_t rounded =
1319
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1320
+ zero_point;
1321
+ retval.vals[i * elem_per_int_vec + j] =
1322
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1323
+ }
1324
+ }
1325
+ return retval;
1326
+ }
1327
+ };
1328
+
1329
+ template <>
1330
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
1331
+ return a.maximum(b);
1332
+ }
1333
+
1334
+ #endif // if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1335
+ }} // namespace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h ADDED
@@ -0,0 +1,1108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+ //
6
+ // Note [Do not compile initializers with AVX]
7
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8
+ // If you define a static initializer in this file, the initialization will use
9
+ // AVX instructions because these object files are compiled with AVX enabled.
10
+ // We need to avoid non-trivial global data in these architecture specific files
11
+ // because there's no way to guard the global initializers with CPU capability
12
+ // detection.
13
+ //
14
+ // See https://github.com/pytorch/pytorch/issues/37577 for an instance
15
+ // of this bug in the past.
16
+
17
+ #include <array>
18
+ #include <algorithm>
19
+ #include <cassert>
20
+ #include <cstring>
21
+ #include <functional>
22
+ #include <cmath>
23
+ #include <type_traits>
24
+ #include <climits>
25
+
26
+ #include <ATen/cpu/vec/intrinsics.h>
27
+ #include <ATen/native/Math.h>
28
+ #include <ATen/NumericUtils.h>
29
+ #include <c10/util/Half.h>
30
+ #include <c10/util/BFloat16.h>
31
+ #include <c10/util/BFloat16-math.h>
32
+ #include <c10/util/copysign.h>
33
+ #include <ATen/native/cpu/zmath.h>
34
+ #include <c10/util/TypeCast.h>
35
+ #include <c10/macros/Macros.h>
36
+ #include <c10/util/irange.h>
37
+ #include <c10/util/Load.h>
38
+
39
+ // These macros helped us unify vec_base.h
40
+ #ifdef CPU_CAPABILITY_AVX512
41
+ #if defined(__GNUC__)
42
+ #define __at_align__ __attribute__((aligned(64)))
43
+ #elif defined(_WIN32)
44
+ #define __at_align__ __declspec(align(64))
45
+ #else
46
+ #define __at_align__
47
+ #endif
48
+ #define VECTOR_WIDTH 64
49
+ #define int_vector __m512i
50
+ #else // CPU_CAPABILITY_AVX512
51
+ #if defined(__GNUC__)
52
+ #define __at_align__ __attribute__((aligned(32)))
53
+ #elif defined(_WIN32)
54
+ #define __at_align__ __declspec(align(32))
55
+ #else
56
+ #define __at_align__
57
+ #endif
58
+ #define VECTOR_WIDTH 32
59
+ #define int_vector __m256i
60
+ #endif // CPU_CAPABILITY_AVX512
61
+
62
+ namespace at::vec {
63
+ // See Note [CPU_CAPABILITY namespace]
64
+ inline namespace CPU_CAPABILITY {
65
+ // at::Half and at::BFloat16 should be treated as floating point
66
+ template <typename T>
67
+ struct is_floating_point:
68
+ std::integral_constant<bool,
69
+ std::is_floating_point<T>::value ||
70
+ std::is_same<T, at::Half>::value ||
71
+ std::is_same<T, at::BFloat16>::value> {
72
+ };
73
+
74
+ template<typename T>
75
+ constexpr bool is_floating_point_v = is_floating_point<T>::value;
76
+
77
+ template <typename T>
78
+ struct is_reduced_floating_point:
79
+ std::integral_constant<bool,
80
+ std::is_same<T, at::Half>::value ||
81
+ std::is_same<T, at::BFloat16>::value> {
82
+ };
83
+
84
+ template <typename T>
85
+ constexpr bool is_reduced_floating_point_v = is_reduced_floating_point<T>::value;
86
+
87
+ template<size_t n> struct int_of_size;
88
+
89
+ #define DEFINE_INT_OF_SIZE(int_t) \
90
+ template<> struct int_of_size<sizeof(int_t)> { using type = int_t; }
91
+
92
+ DEFINE_INT_OF_SIZE(int64_t);
93
+ DEFINE_INT_OF_SIZE(int32_t);
94
+ DEFINE_INT_OF_SIZE(int16_t);
95
+ DEFINE_INT_OF_SIZE(int8_t);
96
+
97
+ #undef DEFINE_INT_OF_SIZE
98
+
99
+ template <typename T>
100
+ using int_same_size_t = typename int_of_size<sizeof(T)>::type;
101
+
102
+ // NOTE: If you specialize on a type, you must define all operations!
103
+
104
+ // emulates Vectorized types
105
+ #if defined(__s390x__)
106
+ template <class T, class TEMP=void>
107
+ #else
108
+ template <class T>
109
+ #endif
110
+ struct Vectorized {
111
+ private:
112
+ __at_align__ T values[VECTOR_WIDTH / sizeof(T)];
113
+ public:
114
+ using value_type = T;
115
+ using size_type = int;
116
+ // Note [constexpr static function to avoid odr-usage compiler bug]
117
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
118
+ // Why, you might ask, is size defined to be a static constexpr function,
119
+ // rather than a more ordinary 'static constexpr int size;' variable?
120
+ // The problem lies within ODR rules for static constexpr members versus
121
+ // static constexpr functions. First, recall that this class (along with all
122
+ // of its derivations) live in an anonymous namespace: they are intended to be
123
+ // *completely* inlined at their use-sites, because we need to compile it
124
+ // multiple times for different instruction sets.
125
+ //
126
+ // Because of this constraint, we CANNOT provide a single definition for
127
+ // any static members in this class; since we want to compile the class
128
+ // multiple times, there wouldn't actually be any good place to put the
129
+ // definition. Now here is the problem: if we ODR-use a static constexpr
130
+ // member, we are *obligated* to provide a definition. Without the
131
+ // definition, you get a compile error like:
132
+ //
133
+ // relocation R_X86_64_PC32 against undefined symbol
134
+ // `_ZN2at6vec25612_GLOBAL__N_16VectorizedIdE4sizeE' can not be used when making
135
+ // a shared object; recompile with -fPIC
136
+ //
137
+ // If this were C++17, we could replace a static constexpr variable with
138
+ // an inline variable which doesn't require one definition. But we are not
139
+ // C++17. So the next best thing is to replace the member with a static
140
+ // constexpr (and therefore inline) function, which does not require ODR
141
+ // either.
142
+ //
143
+ // Also, technically according to the C++ standard, we don't have to define
144
+ // a constexpr variable if we never odr-use it. But it seems that some
145
+ // versions GCC/Clang have buggy determinations on whether or not an
146
+ // identifier is odr-used or not, and in any case it's hard to tell if
147
+ // a variable is odr-used or not. So best to just cut the problem at the root.
148
+ static constexpr size_type size() {
149
+ return VECTOR_WIDTH / sizeof(T);
150
+ }
151
+ Vectorized() : values{static_cast<T>(0)} {}
152
+ Vectorized(T val) {
153
+ for (int i = 0; i != size(); i++) {
154
+ values[i] = val;
155
+ }
156
+ }
157
+ template<typename... Args,
158
+ typename = std::enable_if_t<(sizeof...(Args) == size())>>
159
+ Vectorized(Args... vals) : values{vals...}{
160
+ }
161
+ // This also implies const T& operator[](int idx) const
162
+ inline operator const T*() const {
163
+ return values;
164
+ }
165
+ // This also implies T& operator[](int idx)
166
+ inline operator T*() {
167
+ return values;
168
+ }
169
+ // Return the values as char* for type punning
170
+ auto as_bytes() const -> const char* {
171
+ return reinterpret_cast<const char*>(values);
172
+ }
173
+ template <int64_t mask_>
174
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
175
+ int64_t mask = mask_;
176
+ Vectorized vector;
177
+ for (const auto i : c10::irange(size())) {
178
+ if (mask & 0x01) {
179
+ vector[i] = b[i];
180
+ } else {
181
+ vector[i] = a[i];
182
+ }
183
+ mask = mask >> 1;
184
+ }
185
+ return vector;
186
+ }
187
+ static Vectorized<T> blendv(const Vectorized<T>& a, const Vectorized<T>& b,
188
+ const Vectorized<T>& mask) {
189
+ Vectorized vector;
190
+ int_same_size_t<T> buffer[size()];
191
+ mask.store(buffer);
192
+ for (const auto i : c10::irange(size())) {
193
+ if (buffer[i] & 0x01)
194
+ {
195
+ vector[i] = b[i];
196
+ } else {
197
+ vector[i] = a[i];
198
+ }
199
+ }
200
+ return vector;
201
+ }
202
+ template<typename step_t> // step sometimes requires a higher precision type (e.g., T=int, step_t=double)
203
+ static Vectorized<T> arange(T base = static_cast<T>(0), step_t step = static_cast<step_t>(1)) {
204
+ Vectorized vector;
205
+ for (const auto i : c10::irange(size())) {
206
+ vector.values[i] = base + i * step;
207
+ }
208
+ return vector;
209
+ }
210
+ static Vectorized<T> set(const Vectorized<T>& a, const Vectorized<T>& b, int64_t count = size()) {
211
+ Vectorized vector;
212
+ for (const auto i : c10::irange(size())) {
213
+ if (i < count) {
214
+ vector[i] = b[i];
215
+ } else {
216
+ vector[i] = a[i];
217
+ }
218
+ }
219
+ return vector;
220
+ }
221
+ static Vectorized<T> loadu(const void* ptr) {
222
+ Vectorized vector;
223
+ std::memcpy(vector.values, ptr, VECTOR_WIDTH);
224
+ return vector;
225
+ }
226
+ static Vectorized<T> loadu(const void* ptr, int64_t count) {
227
+ Vectorized vector;
228
+ std::memcpy(vector.values, ptr, count * sizeof(T));
229
+ return vector;
230
+ }
231
+ void store(void* ptr, int count = size()) const {
232
+ std::memcpy(ptr, values, count * sizeof(T));
233
+ }
234
+ int zero_mask() const {
235
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
236
+ int mask = 0;
237
+ for (int i = 0; i < size(); ++ i) {
238
+ if (values[i] == static_cast<T>(0)) {
239
+ mask |= (1 << i);
240
+ }
241
+ }
242
+ return mask;
243
+ }
244
+ Vectorized<T> isnan() const {
245
+ Vectorized<T> vector;
246
+ for (int64_t i = 0; i != size(); i++) {
247
+ if (_isnan(values[i])) {
248
+ std::memset(static_cast<void*>(vector.values + i), 0xFF, sizeof(T));
249
+ } else {
250
+ std::memset(static_cast<void*>(vector.values + i), 0, sizeof(T));
251
+ }
252
+ }
253
+ return vector;
254
+ }
255
+ bool has_inf_nan() const {
256
+ for (int64_t i = 0; i != size(); i++) {
257
+ if(_isnan(values[i]) || _isinf(values[i])) {
258
+ return true;
259
+ }
260
+ }
261
+ return false;
262
+ }
263
+ Vectorized<T> map(T (*const f)(T)) const {
264
+ Vectorized<T> ret;
265
+ for (int64_t i = 0; i != size(); i++) {
266
+ ret[i] = f(values[i]);
267
+ }
268
+ return ret;
269
+ }
270
+ Vectorized<T> map(T (*const f)(const T &)) const {
271
+ Vectorized<T> ret;
272
+ for (int64_t i = 0; i != size(); i++) {
273
+ ret[i] = f(values[i]);
274
+ }
275
+ return ret;
276
+ }
277
+ template <typename other_t_abs = T,
278
+ typename std::enable_if<!is_floating_point_v<other_t_abs> && !c10::is_complex<other_t_abs>::value, int>::type = 0>
279
+ Vectorized<T> abs() const {
280
+ // other_t_abs is for SFINAE and clarity. Make sure it is not changed.
281
+ static_assert(std::is_same<other_t_abs, T>::value, "other_t_abs must be T");
282
+ return map([](T x) -> T { return x < static_cast<T>(0) ? -x : x; });
283
+ }
284
+ template <typename float_t_abs = T,
285
+ typename std::enable_if<is_floating_point_v<float_t_abs>, int>::type = 0>
286
+ Vectorized<T> abs() const {
287
+ // float_t_abs is for SFINAE and clarity. Make sure it is not changed.
288
+ static_assert(std::is_same<float_t_abs, T>::value, "float_t_abs must be T");
289
+ // Specifically deal with floating-point because the generic code above won't handle -0.0 (which should result in
290
+ // 0.0) properly.
291
+ return map([](T x) -> T { return std::abs(x); });
292
+ }
293
+ template <typename complex_t_abs = T,
294
+ typename std::enable_if<c10::is_complex<complex_t_abs>::value, int>::type = 0>
295
+ Vectorized<T> abs() const {
296
+ // complex_t_abs is for SFINAE and clarity. Make sure it is not changed.
297
+ static_assert(std::is_same<complex_t_abs, T>::value, "complex_t_abs must be T");
298
+ // Specifically map() does not perform the type conversion needed by abs.
299
+ return map([](T x) { return static_cast<T>(std::abs(x)); });
300
+ }
301
+
302
+ template <typename other_t_sgn = T,
303
+ typename std::enable_if<c10::is_complex<other_t_sgn>::value, int>::type = 0>
304
+ Vectorized<T> sgn() const {
305
+ return map(at::native::sgn_impl);
306
+ }
307
+
308
+ template <typename other_t_angle = T,
309
+ typename std::enable_if<!c10::is_complex<other_t_angle>::value, int>::type = 0>
310
+ Vectorized<T> angle() const {
311
+ // other_t_angle is for SFINAE and clarity. Make sure it is not changed.
312
+ static_assert(std::is_same<other_t_angle, T>::value, "other_t_angle must be T");
313
+ return map(at::native::angle_impl<T>); // compiler is unable to resolve the overload without <T>
314
+ }
315
+ template <typename complex_t_angle = T,
316
+ typename std::enable_if<c10::is_complex<complex_t_angle>::value, int>::type = 0>
317
+ Vectorized<T> angle() const {
318
+ // complex_t_angle is for SFINAE and clarity. Make sure it is not changed.
319
+ static_assert(std::is_same<complex_t_angle, T>::value, "complex_t_angle must be T");
320
+ return map([](T x) { return static_cast<T>(std::arg(x)); });
321
+ }
322
+ template <typename other_t_real = T,
323
+ typename std::enable_if<!c10::is_complex<other_t_real>::value, int>::type = 0>
324
+ Vectorized<T> real() const {
325
+ // other_t_real is for SFINAE and clarity. Make sure it is not changed.
326
+ static_assert(std::is_same<other_t_real, T>::value, "other_t_real must be T");
327
+ return *this;
328
+ }
329
+ template <typename complex_t_real = T,
330
+ typename std::enable_if<c10::is_complex<complex_t_real>::value, int>::type = 0>
331
+ Vectorized<T> real() const {
332
+ // complex_t_real is for SFINAE and clarity. Make sure it is not changed.
333
+ static_assert(std::is_same<complex_t_real, T>::value, "complex_t_real must be T");
334
+ return map([](T x) { return static_cast<T>(x.real()); });
335
+ }
336
+ template <typename other_t_imag = T,
337
+ typename std::enable_if<!c10::is_complex<other_t_imag>::value, int>::type = 0>
338
+ Vectorized<T> imag() const {
339
+ // other_t_imag is for SFINAE and clarity. Make sure it is not changed.
340
+ static_assert(std::is_same<other_t_imag, T>::value, "other_t_imag must be T");
341
+ return Vectorized(0);
342
+ }
343
+ template <typename complex_t_imag = T,
344
+ typename std::enable_if<c10::is_complex<complex_t_imag>::value, int>::type = 0>
345
+ Vectorized<T> imag() const {
346
+ // complex_t_imag is for SFINAE and clarity. Make sure it is not changed.
347
+ static_assert(std::is_same<complex_t_imag, T>::value, "complex_t_imag must be T");
348
+ return map([](T x) { return static_cast<T>(x.imag()); });
349
+ }
350
+ template <typename other_t_conj = T,
351
+ typename std::enable_if<!c10::is_complex<other_t_conj>::value, int>::type = 0>
352
+ Vectorized<T> conj() const {
353
+ // other_t_conj is for SFINAE and clarity. Make sure it is not changed.
354
+ static_assert(std::is_same<other_t_conj, T>::value, "other_t_conj must be T");
355
+ return *this;
356
+ }
357
+ template <typename complex_t_conj = T,
358
+ typename std::enable_if<c10::is_complex<complex_t_conj>::value, int>::type = 0>
359
+ Vectorized<T> conj() const {
360
+ // complex_t_conj is for SFINAE and clarity. Make sure it is not changed.
361
+ static_assert(std::is_same<complex_t_conj, T>::value, "complex_t_conj must be T");
362
+ return map([](T x) { return static_cast<T>(std::conj(x)); });
363
+ }
364
+ Vectorized<T> acos() const {
365
+ return map(std::acos);
366
+ }
367
+ Vectorized<T> acosh() const {
368
+ return map(std::acosh);
369
+ }
370
+ Vectorized<T> asin() const {
371
+ return map(std::asin);
372
+ }
373
+ Vectorized<T> atan() const {
374
+ return map(std::atan);
375
+ }
376
+ Vectorized<T> atanh() const {
377
+ return map(std::atanh);
378
+ }
379
+ Vectorized<T> atan2(const Vectorized<T> &exp) const {
380
+ Vectorized<T> ret;
381
+ for (const auto i : c10::irange(size())) {
382
+ ret[i] = std::atan2(values[i], exp[i]);
383
+ }
384
+ return ret;
385
+ }
386
+ template <
387
+ typename U = T,
388
+ typename std::enable_if_t<is_floating_point_v<U>, int> = 0>
389
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
390
+ Vectorized<T> ret;
391
+ for (size_type i = 0; i < size(); i++) {
392
+ ret[i] = c10::copysign(values[i], sign[i]);
393
+ }
394
+ return ret;
395
+ }
396
+ Vectorized<T> erf() const {
397
+ return map(std::erf);
398
+ }
399
+ Vectorized<T> erfc() const {
400
+ return map(std::erfc);
401
+ }
402
+ Vectorized<T> erfinv() const {
403
+ return map(calc_erfinv);
404
+ }
405
+ Vectorized<T> exp() const {
406
+ return map(std::exp);
407
+ }
408
+ Vectorized<T> exp2() const {
409
+ return map(exp2_impl);
410
+ }
411
+ Vectorized<T> expm1() const {
412
+ return map(std::expm1);
413
+ }
414
+ Vectorized<T> exp_u20() const {
415
+ return map(std::exp);
416
+ }
417
+ Vectorized<T> frac() const {
418
+ return *this - this->trunc();
419
+ }
420
+ template <
421
+ typename U = T,
422
+ typename std::enable_if_t<is_floating_point_v<U>, int> = 0>
423
+ Vectorized<T> fmod(const Vectorized<T>& q) const {
424
+ // U is for SFINAE purposes only. Make sure it is not changed.
425
+ static_assert(std::is_same<U, T>::value, "U must be T");
426
+ Vectorized<T> ret;
427
+ for (const auto i : c10::irange(size())) {
428
+ ret[i] = std::fmod(values[i], q[i]);
429
+ }
430
+ return ret;
431
+ }
432
+ Vectorized<T> log() const {
433
+ return map(std::log);
434
+ }
435
+ Vectorized<T> log10() const {
436
+ return map(std::log10);
437
+ }
438
+ Vectorized<T> log1p() const {
439
+ return map(std::log1p);
440
+ }
441
+ template <typename other_t_log2 = T,
442
+ typename std::enable_if<!c10::is_complex<other_t_log2>::value, int>::type = 0>
443
+ Vectorized<T> log2() const {
444
+ // other_t_log2 is for SFINAE and clarity. Make sure it is not changed.
445
+ static_assert(std::is_same<other_t_log2, T>::value, "other_t_log2 must be T");
446
+ return map(std::log2);
447
+ }
448
+ template <typename complex_t_log2 = T,
449
+ typename std::enable_if<c10::is_complex<complex_t_log2>::value, int>::type = 0>
450
+ Vectorized<T> log2() const {
451
+ // complex_t_log2 is for SFINAE and clarity. Make sure it is not changed.
452
+ static_assert(std::is_same<complex_t_log2, T>::value, "complex_t_log2 must be T");
453
+ const T log_2 = T(std::log(2.0));
454
+ return Vectorized(map(std::log))/Vectorized(log_2);
455
+ }
456
+ Vectorized<T> ceil() const {
457
+ return map(at::native::ceil_impl);
458
+ }
459
+ Vectorized<T> cos() const {
460
+ return map(std::cos);
461
+ }
462
+ Vectorized<T> cosh() const {
463
+ return map(std::cosh);
464
+ }
465
+ Vectorized<T> floor() const {
466
+ return map(at::native::floor_impl);
467
+ }
468
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
469
+ Vectorized<T> ret;
470
+ for (const auto i : c10::irange(size())) {
471
+ ret[i] = std::hypot(values[i], b[i]);
472
+ }
473
+ return ret;
474
+ }
475
+ Vectorized<T> i0() const {
476
+ return map(calc_i0);
477
+ }
478
+ Vectorized<T> i0e() const {
479
+ return map(calc_i0e);
480
+ }
481
+ Vectorized<T> digamma() const {
482
+ return map(calc_digamma);
483
+ }
484
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
485
+ Vectorized<T> ret;
486
+ for (const auto i : c10::irange(size())) {
487
+ ret[i] = calc_igamma(values[i], x[i]);
488
+ }
489
+ return ret;
490
+ }
491
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
492
+ Vectorized<T> ret;
493
+ for (const auto i : c10::irange(size())) {
494
+ ret[i] = calc_igammac(values[i], x[i]);
495
+ }
496
+ return ret;
497
+ }
498
+ Vectorized<T> neg() const {
499
+ // NB: the trailing return type is needed because we need to coerce the
500
+ // return value back to T in the case of unary operator- incuring a
501
+ // promotion
502
+ return map([](T x) -> T { return -x; });
503
+ }
504
+ Vectorized<T> nextafter(const Vectorized<T> &b) const {
505
+ Vectorized<T> ret;
506
+ for (const auto i : c10::irange(size())) {
507
+ ret[i] = std::nextafter(values[i], b[i]);
508
+ }
509
+ return ret;
510
+ }
511
+ Vectorized<T> round() const {
512
+ // We do not use std::round because we would like to round midway numbers to the nearest even integer.
513
+ return map(at::native::round_impl);
514
+ }
515
+ Vectorized<T> sin() const {
516
+ return map(std::sin);
517
+ }
518
+ Vectorized<T> sinh() const {
519
+ return map(std::sinh);
520
+ }
521
+ Vectorized<T> tan() const {
522
+ return map(std::tan);
523
+ }
524
+ Vectorized<T> tanh() const {
525
+ return map(std::tanh);
526
+ }
527
+ Vectorized<T> trunc() const {
528
+ return map(at::native::trunc_impl);
529
+ }
530
+ Vectorized<T> lgamma() const {
531
+ return map(std::lgamma);
532
+ }
533
+ Vectorized<T> sqrt() const {
534
+ return map(std::sqrt);
535
+ }
536
+ Vectorized<T> reciprocal() const {
537
+ return map([](T x) { return (T)(1) / x; });
538
+ }
539
+ Vectorized<T> rsqrt() const {
540
+ return map([](T x) { return (T)1 / std::sqrt(x); });
541
+ }
542
+ Vectorized<T> pow(const Vectorized<T> &exp) const {
543
+ Vectorized<T> ret;
544
+ for (const auto i : c10::irange(size())) {
545
+ ret[i] = std::pow(values[i], exp[i]);
546
+ }
547
+ return ret;
548
+ }
549
+ private:
550
+ template <typename Op>
551
+ inline Vectorized<T> binary_pred(const Vectorized<T>& other, Op op) const {
552
+ // All bits are set to 1 if the pred is true, otherwise 0.
553
+ Vectorized<T> vector;
554
+ for (int64_t i = 0; i != size(); i++) {
555
+ if (op(values[i], other.values[i])) {
556
+ std::memset(static_cast<void*>(vector.values + i), 0xFF, sizeof(T));
557
+ } else {
558
+ std::memset(static_cast<void*>(vector.values + i), 0, sizeof(T));
559
+ }
560
+ }
561
+ return vector;
562
+ }
563
+
564
+ public:
565
+ Vectorized<T> operator==(const Vectorized<T>& other) const { return binary_pred(other, std::equal_to<T>()); }
566
+ Vectorized<T> operator!=(const Vectorized<T>& other) const { return binary_pred(other, std::not_equal_to<T>()); }
567
+ Vectorized<T> operator>=(const Vectorized<T>& other) const { return binary_pred(other, std::greater_equal<T>()); }
568
+ Vectorized<T> operator<=(const Vectorized<T>& other) const { return binary_pred(other, std::less_equal<T>()); }
569
+ Vectorized<T> operator>(const Vectorized<T>& other) const { return binary_pred(other, std::greater<T>()); }
570
+ Vectorized<T> operator<(const Vectorized<T>& other) const { return binary_pred(other, std::less<T>()); }
571
+
572
+ private:
573
+ template <typename Op>
574
+ inline Vectorized<T> binary_pred_bool(const Vectorized<T>& other, Op op) const {
575
+ // 1 if the pred is true, otherwise 0.
576
+ Vectorized<T> vector;
577
+ for (int i = 0; i != size(); ++ i) {
578
+ vector[i] = static_cast<T>(op(values[i], other.values[i]));
579
+ }
580
+ return vector;
581
+ }
582
+
583
+ public:
584
+ Vectorized<T> eq(const Vectorized<T>& other) const { return binary_pred_bool(other, std::equal_to<T>()); }
585
+ Vectorized<T> ne(const Vectorized<T>& other) const { return binary_pred_bool(other, std::not_equal_to<T>()); }
586
+ Vectorized<T> gt(const Vectorized<T>& other) const { return binary_pred_bool(other, std::greater<T>()); }
587
+ Vectorized<T> ge(const Vectorized<T>& other) const { return binary_pred_bool(other, std::greater_equal<T>()); }
588
+ Vectorized<T> lt(const Vectorized<T>& other) const { return binary_pred_bool(other, std::less<T>()); }
589
+ Vectorized<T> le(const Vectorized<T>& other) const { return binary_pred_bool(other, std::less_equal<T>()); }
590
+ };
591
+
592
+ template <class T> Vectorized<T> inline operator+(const Vectorized<T> &a, const Vectorized<T> &b) {
593
+ Vectorized<T> c;
594
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
595
+ c[i] = a[i] + b[i];
596
+ }
597
+ return c;
598
+ }
599
+
600
+ template <class T> Vectorized<T> inline operator-(const Vectorized<T> &a, const Vectorized<T> &b) {
601
+ Vectorized<T> c;
602
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
603
+ c[i] = a[i] - b[i];
604
+ }
605
+ return c;
606
+ }
607
+
608
+ template <class T> Vectorized<T> inline operator*(const Vectorized<T> &a, const Vectorized<T> &b) {
609
+ Vectorized<T> c;
610
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
611
+ c[i] = a[i] * b[i];
612
+ }
613
+ return c;
614
+ }
615
+
616
+ template <class T> Vectorized<T> inline operator/(const Vectorized<T> &a, const Vectorized<T> &b) __ubsan_ignore_float_divide_by_zero__ {
617
+ Vectorized<T> c;
618
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
619
+ c[i] = a[i] / b[i];
620
+ }
621
+ return c;
622
+ }
623
+
624
+ template <class T,
625
+ typename std::enable_if<!is_floating_point_v<T>, int>::type = 0>
626
+ Vectorized<T> inline operator%(const Vectorized<T> &a, const Vectorized<T> &b) __ubsan_ignore_float_divide_by_zero__ {
627
+ return a - a / b * b;
628
+ }
629
+
630
+ template <class T> Vectorized<T> inline operator||(
631
+ const Vectorized<T> &a, const Vectorized<T> &b) {
632
+ Vectorized<T> c;
633
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
634
+ c[i] = a[i] || b[i];
635
+ }
636
+ return c;
637
+ }
638
+
639
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
640
+ // either input is a NaN.
641
+ template <class T,
642
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
643
+ Vectorized<T> inline maximum(const Vectorized<T> &a, const Vectorized<T> &b) {
644
+ Vectorized<T> c;
645
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
646
+ c[i] = (a[i] > b[i]) ? a[i] : b[i];
647
+ if (_isnan(a[i])) {
648
+ // If either input is NaN, propagate a NaN.
649
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
650
+ // ternary operator above.
651
+ c[i] = a[i];
652
+ }
653
+ }
654
+ return c;
655
+ }
656
+
657
+ template <class T,
658
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
659
+ Vectorized<T> inline maximum(const Vectorized<T> &a, const Vectorized<T> &b) {
660
+ Vectorized<T> c;
661
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
662
+ c[i] = (std::abs(a[i]) > std::abs(b[i])) ? a[i] : b[i];
663
+ if (_isnan(a[i])) {
664
+ // If either input is NaN, propagate a NaN.
665
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
666
+ // ternary operator above.
667
+ c[i] = a[i];
668
+ }
669
+ }
670
+ return c;
671
+ }
672
+
673
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
674
+ // either input is a NaN.
675
+ template <class T,
676
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
677
+ Vectorized<T> inline minimum(const Vectorized<T> &a, const Vectorized<T> &b) {
678
+ Vectorized<T> c;
679
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
680
+ c[i] = (a[i] < b[i]) ? a[i] : b[i];
681
+ if (_isnan(a[i])) {
682
+ // If either input is NaN, propagate a NaN.
683
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
684
+ // ternary operator above.
685
+ c[i] = a[i];
686
+ }
687
+ }
688
+ return c;
689
+ }
690
+
691
+ template <class T,
692
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
693
+ Vectorized<T> inline minimum(const Vectorized<T> &a, const Vectorized<T> &b) {
694
+ Vectorized<T> c;
695
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
696
+ c[i] = (std::abs(a[i]) < std::abs(b[i])) ? a[i] : b[i];
697
+ if (_isnan(a[i])) {
698
+ // If either input is NaN, propagate a NaN.
699
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
700
+ // ternary operator above.
701
+ c[i] = a[i];
702
+ }
703
+ }
704
+ return c;
705
+ }
706
+
707
+ template <class T,
708
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
709
+ Vectorized<T> inline clamp(const Vectorized<T> &a, const Vectorized<T> &min_vec, const Vectorized<T> &max_vec) {
710
+ Vectorized<T> c;
711
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
712
+ c[i] = std::min(std::max(a[i], min_vec[i]), max_vec[i]);
713
+ }
714
+ return c;
715
+ }
716
+
717
+ template <class T,
718
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
719
+ Vectorized<T> inline clamp_max(const Vectorized<T> &a, const Vectorized<T> &max_vec) {
720
+ Vectorized<T> c;
721
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
722
+ c[i] = a[i] > max_vec[i] ? max_vec[i] : a[i];
723
+ }
724
+ return c;
725
+ }
726
+
727
+ template <class T,
728
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
729
+ Vectorized<T> inline clamp_min(const Vectorized<T> &a, const Vectorized<T> &min_vec) {
730
+ Vectorized<T> c;
731
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
732
+ c[i] = a[i] < min_vec[i] ? min_vec[i] : a[i];
733
+ }
734
+ return c;
735
+ }
736
+
737
+ struct Vectorizedi;
738
+
739
+ #if defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)
740
+ template <class T, typename Op>
741
+ static inline Vectorized<T> bitwise_binary_op(const Vectorized<T> &a, const Vectorized<T> &b, Op op) {
742
+ int_vector buffer;
743
+ #if defined(CPU_CAPABILITY_AVX2)
744
+ int_vector a_buffer = _mm256_load_si256(reinterpret_cast<const int_vector*>((const T*)a));
745
+ int_vector b_buffer = _mm256_load_si256(reinterpret_cast<const int_vector*>((const T*)b));
746
+ #elif defined(CPU_CAPABILITY_AVX512)
747
+ int_vector a_buffer = _mm512_load_si512(reinterpret_cast<const int_vector*>((const T*)a));
748
+ int_vector b_buffer = _mm512_load_si512(reinterpret_cast<const int_vector*>((const T*)b));
749
+ #endif
750
+ buffer = op(a_buffer, b_buffer);
751
+ __at_align__ T results[Vectorized<T>::size()];
752
+
753
+ #if defined(CPU_CAPABILITY_AVX2)
754
+ _mm256_store_si256(reinterpret_cast<int_vector*>(results), buffer);
755
+ #elif defined(CPU_CAPABILITY_AVX512)
756
+ _mm512_store_si512(reinterpret_cast<int_vector*>(results), buffer);
757
+ #endif
758
+ return Vectorized<T>::loadu(results);
759
+ }
760
+
761
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
762
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
763
+ // We enclose _mm512_and_si512 or _mm256_and_si256 with lambda because it is always_inline
764
+ #if defined(CPU_CAPABILITY_AVX2)
765
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_and_si256(a, b); });
766
+ #elif defined(CPU_CAPABILITY_AVX512)
767
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_and_si512(a, b); });
768
+ #endif
769
+ }
770
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
771
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
772
+ // We enclose _mm512_or_si512 or _mm256_or_si256 with lambda because it is always_inline
773
+ #if defined(CPU_CAPABILITY_AVX2)
774
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_or_si256(a, b); });
775
+ #elif defined(CPU_CAPABILITY_AVX512)
776
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_or_si512(a, b); });
777
+ #endif
778
+ }
779
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
780
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
781
+ // We enclose _mm512_xor_si512 or _mm256_xor_si256 with lambda because it is always_inline
782
+ #if defined(CPU_CAPABILITY_AVX2)
783
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_xor_si256(a, b); });
784
+ #elif defined(CPU_CAPABILITY_AVX512)
785
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_xor_si512(a, b); });
786
+ #endif
787
+ }
788
+
789
+ #else
790
+
791
+ template <typename T>
792
+ auto load(char const* data) -> T {
793
+ T ret;
794
+ std::memcpy(&ret, data, sizeof(ret));
795
+ return ret;
796
+ }
797
+
798
+ template<class T, typename Op>
799
+ static inline Vectorized<T> bitwise_binary_op(const Vectorized<T> &a, const Vectorized<T> &b, Op op) {
800
+ static constexpr uint32_t element_no = VECTOR_WIDTH / sizeof(intmax_t);
801
+ __at_align__ intmax_t buffer[element_no];
802
+ static_assert(VECTOR_WIDTH % sizeof(intmax_t) == 0, "VECTOR_WIDTH not a multiple of sizeof(intmax_t)");
803
+ static_assert(sizeof(buffer) == sizeof(Vectorized<T>), "sizeof(buffer) must match sizeof(Vectorized<T>)");
804
+ // We should be using memcpy in order to respect the strict aliasing rule
805
+ // see: https://github.com/pytorch/pytorch/issues/66119
806
+ // Using char* is defined in the C11 standard 6.5 Expression paragraph 7
807
+ // (http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf)
808
+ const auto* a_data = a.as_bytes();
809
+ const auto* b_data = b.as_bytes();
810
+ // load each intmax_t chunk and process; increase pointers by sizeof(intmax_t)
811
+ for (auto& out : buffer) {
812
+ out = op(load<intmax_t>(a_data), load<intmax_t>(b_data));
813
+ a_data += sizeof(intmax_t);
814
+ b_data += sizeof(intmax_t);
815
+ }
816
+ assert(a_data == a.as_bytes() + sizeof(a));
817
+ assert(b_data == b.as_bytes() + sizeof(b));
818
+ return Vectorized<T>::loadu(buffer);
819
+ }
820
+
821
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
822
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
823
+ return bitwise_binary_op(a, b, std::bit_and<intmax_t>());
824
+ }
825
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
826
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
827
+ return bitwise_binary_op(a, b, std::bit_or<intmax_t>());
828
+ }
829
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
830
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
831
+ return bitwise_binary_op(a, b, std::bit_xor<intmax_t>());
832
+ }
833
+
834
+ #endif // defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)
835
+
836
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
837
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
838
+ Vectorized<T> ones; // All bits are 1
839
+ memset((T*) ones, 0xFF, VECTOR_WIDTH);
840
+ return a ^ ones;
841
+ }
842
+
843
+ template <class T> Vectorized<T> inline operator<<(const Vectorized<T> &a, const Vectorized<T> &b) {
844
+ constexpr T max_shift = sizeof(T) * CHAR_BIT;
845
+ Vectorized<T> c;
846
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
847
+ T shift = b[i];
848
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
849
+ c[i] = 0;
850
+ } else {
851
+ c[i] = static_cast<std::make_unsigned_t<T>>(a[i]) << shift;
852
+ }
853
+ }
854
+ return c;
855
+ }
856
+
857
+ template <class T> Vectorized<T> inline operator>>(const Vectorized<T> &a, const Vectorized<T> &b) {
858
+ // right shift value to retain sign bit for signed and no bits for unsigned
859
+ constexpr T max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v<T>;
860
+ Vectorized<T> c;
861
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
862
+ T shift = b[i];
863
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
864
+ c[i] = a[i] >> max_shift;
865
+ } else {
866
+ c[i] = a[i] >> shift;
867
+ }
868
+ }
869
+ return c;
870
+ }
871
+
872
+ template <typename T>
873
+ inline Vectorized<T>& operator += (Vectorized<T>& a, const Vectorized<T>& b) {
874
+ a = a + b;
875
+ return a;
876
+ }
877
+ template <typename T>
878
+ inline Vectorized<T>& operator -= (Vectorized<T>& a, const Vectorized<T>& b) {
879
+ a = a - b;
880
+ return a;
881
+ }
882
+ template <typename T>
883
+ inline Vectorized<T>& operator /= (Vectorized<T>& a, const Vectorized<T>& b) {
884
+ a = a / b;
885
+ return a;
886
+ }
887
+ template <typename T>
888
+ inline Vectorized<T>& operator %= (Vectorized<T>& a, const Vectorized<T>& b) {
889
+ a = a % b;
890
+ return a;
891
+ }
892
+ template <typename T>
893
+ inline Vectorized<T>& operator *= (Vectorized<T>& a, const Vectorized<T>& b) {
894
+ a = a * b;
895
+ return a;
896
+ }
897
+
898
+ template <typename T>
899
+ inline Vectorized<T>& operator <<= (Vectorized<T>& a, const Vectorized<T>& b) {
900
+ a = a << b;
901
+ return a;
902
+ }
903
+
904
+ template <typename T>
905
+ inline Vectorized<T>& operator >>= (Vectorized<T>& a, const Vectorized<T>& b) {
906
+ a = a >> b;
907
+ return a;
908
+ }
909
+
910
+ template <typename T>
911
+ inline Vectorized<T> fmadd(const Vectorized<T>& a, const Vectorized<T>& b, const Vectorized<T>& c) {
912
+ return a * b + c;
913
+ }
914
+
915
+ template <typename T>
916
+ inline Vectorized<T> fmsub(const Vectorized<T>& a, const Vectorized<T>& b, const Vectorized<T>& c) {
917
+ return a * b - c;
918
+ }
919
+
920
+ template <int64_t scale = 1, typename T = void>
921
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<T>>
922
+ inline gather(T const* base_addr, const Vectorized<int_same_size_t<T>>& vindex) {
923
+ static constexpr int size = Vectorized<T>::size();
924
+ int_same_size_t<T> index_arr[size];
925
+ vindex.store(static_cast<void*>(index_arr));
926
+ T buffer[size];
927
+ for (const auto i : c10::irange(size)) {
928
+ buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)];
929
+ }
930
+ return Vectorized<T>::loadu(static_cast<void*>(buffer));
931
+ }
932
+
933
+ template <int64_t scale = 1, typename T = void>
934
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<T>>
935
+ inline mask_gather(const Vectorized<T>& src, T const* base_addr,
936
+ const Vectorized<int_same_size_t<T>>& vindex, Vectorized<T>& mask) {
937
+ static constexpr int size = Vectorized<T>::size();
938
+ T src_arr[size];
939
+ int_same_size_t<T> mask_arr[size]; // use int type so we can logical and
940
+ int_same_size_t<T> index_arr[size];
941
+ src.store(static_cast<void*>(src_arr));
942
+ mask.store(static_cast<void*>(mask_arr));
943
+ vindex.store(static_cast<void*>(index_arr));
944
+ T buffer[size];
945
+ for (const auto i : c10::irange(size)) {
946
+ if (mask_arr[i] & 0x01) { // check highest bit
947
+ buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)];
948
+ } else {
949
+ buffer[i] = src_arr[i];
950
+ }
951
+ }
952
+ mask = Vectorized<T>(); // "zero out" mask
953
+ return Vectorized<T>::loadu(static_cast<void*>(buffer));
954
+ }
955
+
956
+ // Cast a given vector to another type without changing the bits representation.
957
+ // So a Vectorized<double> of 512 bits containing all ones can be cast to a
958
+ // Vectorized<int64_t> of 512 bits containing all ones (i.e., eight negative 1s).
959
+ // A Vec<double> of 256 bits containing all ones can be cast to a
960
+ // Vec<int64_t> of 256 bits containing all ones (i.e., four negative 1s).
961
+ // There is a struct here because we don't have static_if and I can't
962
+ // partially specialize a templated function.
963
+ template<typename dst_t, typename src_t>
964
+ struct CastImpl {
965
+ static inline Vectorized<dst_t> apply(const Vectorized<src_t>& src) {
966
+ src_t src_arr[Vectorized<src_t>::size()];
967
+ src.store(static_cast<void*>(src_arr));
968
+ return Vectorized<dst_t>::loadu(static_cast<const void*>(src_arr));
969
+ }
970
+ };
971
+
972
+ template<typename scalar_t>
973
+ struct CastImpl<scalar_t, scalar_t> {
974
+ static inline Vectorized<scalar_t> apply(const Vectorized<scalar_t>& src) {
975
+ return src;
976
+ }
977
+ };
978
+
979
+ template<typename dst_t, typename src_t>
980
+ inline Vectorized<dst_t> cast(const Vectorized<src_t>& src) {
981
+ return CastImpl<dst_t, src_t>::apply(src);
982
+ }
983
+
984
+ template <typename T, typename IntType = int_same_size_t<T>>
985
+ inline Vectorized<IntType> convert_to_int_of_same_size(const Vectorized<T>& src) {
986
+ static_assert(sizeof(T) == sizeof(IntType));
987
+ static constexpr int size = Vectorized<T>::size();
988
+
989
+ std::array<T, size> src_arr;
990
+ src.store(static_cast<void*>(src_arr.data()));
991
+ std::array<IntType, size> buffer;
992
+ std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(),
993
+ [](const T& x) { return static_cast<IntType>(x); });
994
+ return Vectorized<IntType>::loadu(static_cast<const void*>(buffer.data()));
995
+ }
996
+
997
+ template <typename T, typename IntType = int_same_size_t<T>>
998
+ inline Vectorized<T> convert_to_fp_of_same_size(const Vectorized<IntType>& src) {
999
+ static_assert(sizeof(T) == sizeof(IntType));
1000
+ static constexpr int size = Vectorized<T>::size();
1001
+
1002
+ std::array<IntType, size> src_arr;
1003
+ src.store(static_cast<void*>(src_arr.data()));
1004
+ std::array<T, size> buffer;
1005
+ std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(),
1006
+ [](const IntType& x) { return static_cast<T>(x); });
1007
+ return Vectorized<T>::loadu(static_cast<const void*>(buffer.data()));
1008
+ }
1009
+
1010
+ // Example inputs for AVX512:
1011
+ // a Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
1012
+ // b Vectorized<float> = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
1013
+ // returns:
1014
+ // Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
1015
+ // Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
1016
+ // Example inputs for AVX2: a Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3}
1017
+ // b Vectorized<float> = {a4, b4, a5, b5, a6, b6, a7, b7}
1018
+ // returns: Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7}
1019
+ // Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7}
1020
+ template <typename T>
1021
+ inline std::enable_if_t<Vectorized<T>::size() % 2 == 0, std::pair<Vectorized<T>, Vectorized<T>>>
1022
+ deinterleave2(const Vectorized<T>& a, const Vectorized<T>& b) {
1023
+ static constexpr int size = Vectorized<T>::size();
1024
+ static constexpr int half_size = size / 2;
1025
+ T a_arr[size];
1026
+ T b_arr[size];
1027
+ T buffer1[size];
1028
+ T buffer2[size];
1029
+ a.store(static_cast<void*>(a_arr));
1030
+ b.store(static_cast<void*>(b_arr));
1031
+ for (const auto i : c10::irange(half_size)) {
1032
+ buffer1[i] = a_arr[i * 2];
1033
+ buffer1[half_size + i] = b_arr[i * 2];
1034
+ buffer2[i] = a_arr[i * 2 + 1];
1035
+ buffer2[half_size + i] = b_arr[i * 2 + 1];
1036
+ }
1037
+ return std::make_pair(Vectorized<T>::loadu(static_cast<void*>(buffer1)),
1038
+ Vectorized<T>::loadu(static_cast<void*>(buffer2)));
1039
+ }
1040
+
1041
+ // inverse operation of deinterleave2
1042
+ // Example inputs for AVX512:
1043
+ // a Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
1044
+ // b Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
1045
+ // returns, for AVX512:
1046
+ // Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
1047
+ // Vectorized<float> = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
1048
+ // Example inputs for AVX2 : a Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7}
1049
+ // b Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7}
1050
+ // returns: Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3}
1051
+ // Vectorized<float> = {a4, b4, a5, b5, a6, b6, a7, b7}
1052
+ template <typename T>
1053
+ inline std::enable_if_t<Vectorized<T>::size() % 2 == 0, std::pair<Vectorized<T>, Vectorized<T>>>
1054
+ interleave2(const Vectorized<T>& a, const Vectorized<T>& b) {
1055
+ static constexpr int size = Vectorized<T>::size();
1056
+ static constexpr int half_size = size / 2;
1057
+ T a_arr[size];
1058
+ T b_arr[size];
1059
+ T buffer1[size];
1060
+ T buffer2[size];
1061
+ a.store(static_cast<void*>(a_arr));
1062
+ b.store(static_cast<void*>(b_arr));
1063
+ for (const auto i : c10::irange(half_size)) {
1064
+ buffer1[i * 2] = a_arr[i];
1065
+ buffer1[i * 2 + 1] = b_arr[i];
1066
+ buffer2[i * 2] = a_arr[half_size + i];
1067
+ buffer2[i * 2 + 1] = b_arr[half_size + i];
1068
+ }
1069
+ return std::make_pair(Vectorized<T>::loadu(static_cast<void*>(buffer1)),
1070
+ Vectorized<T>::loadu(static_cast<void*>(buffer2)));
1071
+ }
1072
+
1073
+ template <typename src_T, typename dst_T>
1074
+ inline void convert(const src_T *src, dst_T *dst, int64_t n) {
1075
+ #ifndef _MSC_VER
1076
+ # pragma unroll
1077
+ #endif
1078
+ for (C10_UNUSED const auto i : c10::irange(n)) {
1079
+ *dst = c10::convert<dst_T>(c10::load(src));
1080
+ src++;
1081
+ dst++;
1082
+ }
1083
+ }
1084
+
1085
+ template <typename T>
1086
+ inline Vectorized<T> flip(const Vectorized<T> & data) {
1087
+ static constexpr int size = Vectorized<T>::size();
1088
+ T output[size];
1089
+ T buffer[size];
1090
+ data.store(static_cast<void*>(buffer));
1091
+ for (const auto i : c10::irange(size)) {
1092
+ output[i] = buffer[size - i - 1];
1093
+ }
1094
+ return Vectorized<T>::loadu(static_cast<void*>(output));
1095
+ }
1096
+
1097
+ // Transpose the `src` buffer of type `T` and size (M,N) into the `dst` buffer. `ld_src` is the leading
1098
+ // dimension of `src` and `ld_dst` is the leading dimension of `dst`.
1099
+ template <typename T, int M, int N>
1100
+ inline void transpose_mxn(const T* src, int64_t ld_src, T* dst, int64_t ld_dst) {
1101
+ for (int i = 0; i < M; i++) {
1102
+ for (int j = 0; j < N; j++) {
1103
+ dst[j*ld_dst + i] = src[i*ld_src + j];
1104
+ }
1105
+ }
1106
+ }
1107
+
1108
+ }} // namespace at::vec::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+
5
+ namespace at::vec {
6
+ // See Note [CPU_CAPABILITY namespace]
7
+ inline namespace CPU_CAPABILITY {
8
+
9
+ #if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
10
+ !defined(__APPLE__)
11
+ static inline uint16_t float2half_scalar(float val) {
12
+ #if defined(CPU_CAPABILITY_AVX2)
13
+ #if defined(_MSC_VER)
14
+ __m256 v = _mm256_set1_ps(val);
15
+ __m128i o =
16
+ _mm256_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
17
+ return static_cast<std::uint16_t>(_mm_cvtsi128_si32(o));
18
+ #else
19
+ return _cvtss_sh(val, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
20
+ #endif
21
+ #elif defined(CPU_CAPABILITY_AVX512)
22
+ __m512 v = _mm512_set1_ps(val);
23
+ __m256i o =
24
+ _mm512_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
25
+ return static_cast<std::uint16_t>(
26
+ _mm_cvtsi128_si32(_mm256_castsi256_si128(o)));
27
+ #endif
28
+ }
29
+
30
+ static inline float half2float_scalar(uint16_t val) {
31
+ #if defined(CPU_CAPABILITY_AVX2)
32
+ #if defined(_MSC_VER)
33
+ __m128i v = _mm_cvtsi32_si128(val);
34
+ __m256 o = _mm256_cvtph_ps(v);
35
+ return _mm256_cvtss_f32(o);
36
+ #else
37
+ return _cvtsh_ss(val);
38
+ #endif
39
+ #elif defined(CPU_CAPABILITY_AVX512)
40
+ __m256i v =
41
+ _mm256_setr_epi16(val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
42
+ __m512 o = _mm512_cvtph_ps(v);
43
+ return _mm512_cvtss_f32(o);
44
+ #endif
45
+ }
46
+
47
+ #endif
48
+
49
+ } // namespace CPU_CAPABILITY
50
+ } // namespace at::vec
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/cpu/vec/vec_base.h>
2
+ #include <array>
3
+
4
+ namespace at::vec {
5
+ inline namespace CPU_CAPABILITY {
6
+
7
+ /**
8
+ * @brief A class template representing a vectorized type with
9
+ * `N * Vectorized<T>::size()` elements, aiming to support vectors of
10
+ * arbitrary size. A specific use case of it is to represent vectors
11
+ * converted from data types with different sizes but with the same
12
+ * number of vector elements, e.g., `VectorizedN<float, 2>` can be
13
+ * a vector converted from two `Vectorized<bfloat16>`, `VectorizedN<int64_t, 2>`
14
+ * can be a vector converted from two `Vectorized<int32_t>` etc.
15
+ *
16
+ * It supports most of the operations of `Vectorized<T>`
17
+ * and the implementation delegates to `Vectorized<T>` with loops over `N`.
18
+ *
19
+ * @tparam T The underlying type of the vectorized elements.
20
+ * @tparam N The number of underlying `Vectorized<T>`.
21
+ */
22
+ template <typename T, int N>
23
+ class VectorizedN {
24
+ public:
25
+ using value_type = T;
26
+ using size_type = int;
27
+
28
+ static constexpr size_type size_T = sizeof(T);
29
+ static constexpr size_type size() {
30
+ return Vectorized<T>::size() * N;
31
+ }
32
+
33
+ private:
34
+ std::array<Vectorized<T>, N> values;
35
+
36
+ public:
37
+ // methods not implemented yet:
38
+ // variadic constructor, operator T*, as_bytes, zero_mask
39
+
40
+ #define VECTORIZEDN_DEFINE_UNARY_OP(op) \
41
+ VectorizedN<T, N> op() const { \
42
+ return unary_op([](const Vectorized<T>& a) { return a.op(); }); \
43
+ }
44
+
45
+ #define VECTORIZEDN_DEFINE_BINARY_OP(op) \
46
+ VectorizedN<T, N> op(const VectorizedN<T, N>& other) const { \
47
+ return binary_op( \
48
+ other, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
49
+ return a.op(b); \
50
+ }); \
51
+ }
52
+
53
+ template <typename Op>
54
+ inline VectorizedN<T, N> unary_op(Op op) const {
55
+ VectorizedN<T, N> result;
56
+ #ifndef _MSC_VER
57
+ #pragma unroll
58
+ #endif
59
+ for (int i = 0; i < N; ++i) {
60
+ result.values[i] = op(values[i]);
61
+ }
62
+ return result;
63
+ }
64
+
65
+ template <typename Op>
66
+ inline VectorizedN<T, N> binary_op(const VectorizedN<T, N>& other, Op op)
67
+ const {
68
+ VectorizedN<T, N> result;
69
+ #ifndef _MSC_VER
70
+ #pragma unroll
71
+ #endif
72
+ for (int i = 0; i < N; ++i) {
73
+ result.values[i] = op(values[i], other.values[i]);
74
+ }
75
+ return result;
76
+ }
77
+
78
+ VectorizedN() = default;
79
+
80
+ explicit VectorizedN(T val) {
81
+ for (int i = 0; i < N; ++i) {
82
+ values[i] = Vectorized<T>(val);
83
+ }
84
+ }
85
+
86
+ const Vectorized<T>& operator[](int i) const {
87
+ return values[i];
88
+ }
89
+
90
+ Vectorized<T>& operator[](int i) {
91
+ return values[i];
92
+ }
93
+
94
+ template <int64_t mask>
95
+ static VectorizedN<T, N> blend(
96
+ const VectorizedN<T, N>& a,
97
+ const VectorizedN<T, N>& b) {
98
+ VectorizedN<T, N> result;
99
+ for (int i = 0; i < N; ++i) {
100
+ result.values[i] = Vectorized<T>::blend<mask>(a.values[i], b.values[i]);
101
+ }
102
+ return result;
103
+ }
104
+
105
+ static VectorizedN<T, N> blendv(
106
+ const VectorizedN<T, N>& a,
107
+ const VectorizedN<T, N>& b,
108
+ const VectorizedN<T, N>& mask) {
109
+ VectorizedN<T, N> result;
110
+ for (int i = 0; i < N; ++i) {
111
+ result.values[i] =
112
+ Vectorized<T>::blendv(a.values[i], b.values[i], mask.values[i]);
113
+ }
114
+ return result;
115
+ }
116
+
117
+ template <typename step_t>
118
+ static VectorizedN<T, N> arange(
119
+ T base = static_cast<T>(0),
120
+ step_t step = static_cast<step_t>(1)) {
121
+ VectorizedN<T, N> result;
122
+ for (int i = 0; i < N; ++i) {
123
+ result.values[i] = Vectorized<T>::arange(base, step);
124
+ base += step * Vectorized<T>::size();
125
+ }
126
+ return result;
127
+ }
128
+
129
+ static VectorizedN<T, N> set(
130
+ const VectorizedN<T, N>& a,
131
+ const VectorizedN<T, N>& b,
132
+ int64_t count = size()) {
133
+ VectorizedN<T, N> result;
134
+ for (int i = 0; i < N; ++i) {
135
+ result.values[i] =
136
+ Vectorized<T>::set(a.values[i], b.values[i], std::min(count, Vectorized<T>::size()));
137
+ count -= Vectorized<T>::size();
138
+ if (count <= 0) {
139
+ break;
140
+ }
141
+ }
142
+ return result;
143
+ }
144
+
145
+ static VectorizedN<T, N> loadu(const void* ptr) {
146
+ VectorizedN<T, N> result;
147
+ for (int i = 0; i < N; ++i) {
148
+ result.values[i] = Vectorized<T>::loadu(ptr);
149
+ ptr = static_cast<const T*>(ptr) + Vectorized<T>::size();
150
+ }
151
+ return result;
152
+ }
153
+
154
+ static VectorizedN<T, N> loadu(const void* ptr, int64_t count) {
155
+ VectorizedN<T, N> result;
156
+ for (int i = 0; i < N; ++i) {
157
+ result.values[i] =
158
+ Vectorized<T>::loadu(ptr, std::min(count, Vectorized<T>::size()));
159
+ ptr = static_cast<const T*>(ptr) + Vectorized<T>::size();
160
+ count -= Vectorized<T>::size();
161
+ if (count <= 0) {
162
+ break;
163
+ }
164
+ }
165
+ return result;
166
+ }
167
+
168
+ void store(void* ptr) const {
169
+ for (int i = 0; i < N; ++i) {
170
+ values[i].store(ptr);
171
+ ptr = static_cast<T*>(ptr) + Vectorized<T>::size();
172
+ }
173
+ }
174
+
175
+ void store(void* ptr, int count) const {
176
+ for (int i = 0; i < N; ++i) {
177
+ values[i].store(ptr, std::min(count, Vectorized<T>::size()));
178
+ ptr = static_cast<T*>(ptr) + Vectorized<T>::size();
179
+ count -= Vectorized<T>::size();
180
+ if (count <= 0) {
181
+ break;
182
+ }
183
+ }
184
+ }
185
+
186
+ bool has_inf_nan() const {
187
+ for (int i = 0; i < N; ++i) {
188
+ if (values[i].has_inf_nan()) {
189
+ return true;
190
+ }
191
+ }
192
+ return false;
193
+ }
194
+
195
+ VectorizedN<T, N> map(T (*const f)(T)) const {
196
+ VectorizedN<T, N> result;
197
+ for (int i = 0; i < N; ++i) {
198
+ result.values[i] = values[i].map(f);
199
+ }
200
+ return result;
201
+ }
202
+
203
+ VectorizedN<T, N> map(T (*const f)(const T&)) const {
204
+ VectorizedN<T, N> result;
205
+ for (int i = 0; i < N; ++i) {
206
+ result.values[i] = values[i].map(f);
207
+ }
208
+ return result;
209
+ }
210
+
211
+ VECTORIZEDN_DEFINE_UNARY_OP(abs)
212
+ VECTORIZEDN_DEFINE_UNARY_OP(sgn)
213
+ VECTORIZEDN_DEFINE_UNARY_OP(angle)
214
+ VECTORIZEDN_DEFINE_UNARY_OP(real)
215
+ VECTORIZEDN_DEFINE_UNARY_OP(imag)
216
+ VECTORIZEDN_DEFINE_UNARY_OP(conj)
217
+ VECTORIZEDN_DEFINE_UNARY_OP(acos)
218
+ VECTORIZEDN_DEFINE_UNARY_OP(acosh)
219
+ VECTORIZEDN_DEFINE_UNARY_OP(asin)
220
+ VECTORIZEDN_DEFINE_UNARY_OP(atan)
221
+ VECTORIZEDN_DEFINE_UNARY_OP(atanh)
222
+ VECTORIZEDN_DEFINE_BINARY_OP(atan2)
223
+ VECTORIZEDN_DEFINE_BINARY_OP(copysign)
224
+ VECTORIZEDN_DEFINE_UNARY_OP(erf)
225
+ VECTORIZEDN_DEFINE_UNARY_OP(erfc)
226
+ VECTORIZEDN_DEFINE_UNARY_OP(erfinv)
227
+ VECTORIZEDN_DEFINE_UNARY_OP(exp)
228
+ VECTORIZEDN_DEFINE_UNARY_OP(exp2)
229
+ VECTORIZEDN_DEFINE_UNARY_OP(expm1)
230
+ VECTORIZEDN_DEFINE_UNARY_OP(exp_u20)
231
+ VECTORIZEDN_DEFINE_UNARY_OP(frac)
232
+ VECTORIZEDN_DEFINE_BINARY_OP(fmod)
233
+ VECTORIZEDN_DEFINE_UNARY_OP(log)
234
+ VECTORIZEDN_DEFINE_UNARY_OP(log10)
235
+ VECTORIZEDN_DEFINE_UNARY_OP(log1p)
236
+ VECTORIZEDN_DEFINE_UNARY_OP(log2)
237
+ VECTORIZEDN_DEFINE_UNARY_OP(ceil)
238
+ VECTORIZEDN_DEFINE_UNARY_OP(cos)
239
+ VECTORIZEDN_DEFINE_UNARY_OP(cosh)
240
+ VECTORIZEDN_DEFINE_UNARY_OP(floor)
241
+ VECTORIZEDN_DEFINE_BINARY_OP(hypot)
242
+ VECTORIZEDN_DEFINE_UNARY_OP(i0)
243
+ VECTORIZEDN_DEFINE_UNARY_OP(i0e)
244
+ VECTORIZEDN_DEFINE_UNARY_OP(digamma)
245
+ VECTORIZEDN_DEFINE_BINARY_OP(igamma)
246
+ VECTORIZEDN_DEFINE_BINARY_OP(igammac)
247
+ VECTORIZEDN_DEFINE_UNARY_OP(neg)
248
+ VECTORIZEDN_DEFINE_BINARY_OP(nextafter)
249
+ VECTORIZEDN_DEFINE_UNARY_OP(round)
250
+ VECTORIZEDN_DEFINE_UNARY_OP(sin)
251
+ VECTORIZEDN_DEFINE_UNARY_OP(sinh)
252
+ VECTORIZEDN_DEFINE_UNARY_OP(tan)
253
+ VECTORIZEDN_DEFINE_UNARY_OP(tanh)
254
+ VECTORIZEDN_DEFINE_UNARY_OP(trunc)
255
+ VECTORIZEDN_DEFINE_UNARY_OP(lgamma)
256
+ VECTORIZEDN_DEFINE_UNARY_OP(sqrt)
257
+ VECTORIZEDN_DEFINE_UNARY_OP(reciprocal)
258
+ VECTORIZEDN_DEFINE_UNARY_OP(rsqrt)
259
+ VECTORIZEDN_DEFINE_BINARY_OP(pow)
260
+ VECTORIZEDN_DEFINE_BINARY_OP(operator==)
261
+ VECTORIZEDN_DEFINE_BINARY_OP(operator!=)
262
+ VECTORIZEDN_DEFINE_BINARY_OP(operator>=)
263
+ VECTORIZEDN_DEFINE_BINARY_OP(operator<=)
264
+ VECTORIZEDN_DEFINE_BINARY_OP(operator>)
265
+ VECTORIZEDN_DEFINE_BINARY_OP(operator<)
266
+ VECTORIZEDN_DEFINE_BINARY_OP(eq)
267
+ VECTORIZEDN_DEFINE_BINARY_OP(ne)
268
+ VECTORIZEDN_DEFINE_BINARY_OP(gt)
269
+ VECTORIZEDN_DEFINE_BINARY_OP(ge)
270
+ VECTORIZEDN_DEFINE_BINARY_OP(lt)
271
+ VECTORIZEDN_DEFINE_BINARY_OP(le)
272
+
273
+ #undef VECTORIZEDN_DEFINE_UNARY_OP
274
+ #undef VECTORIZEDN_DEFINE_BINARY_OP
275
+ };
276
+
277
+ #define VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(op) \
278
+ template <typename T, int N> \
279
+ inline VectorizedN<T, N> op(const VectorizedN<T, N>& a) { \
280
+ return a.unary_op([](const Vectorized<T>& a) { return op(a); }); \
281
+ }
282
+
283
+ #define VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(op) \
284
+ template <typename T, int N> \
285
+ inline VectorizedN<T, N> op( \
286
+ const VectorizedN<T, N>& a, const VectorizedN<T, N>& b) { \
287
+ return a.binary_op(b, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
288
+ return op(a, b); \
289
+ }); \
290
+ }
291
+
292
+ #define VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(op) \
293
+ template <typename T, int N> \
294
+ inline VectorizedN<T, N>& op( \
295
+ VectorizedN<T, N>& a, const VectorizedN<T, N>& b) { \
296
+ a = a.binary_op(b, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
297
+ return op(a, b); \
298
+ }); \
299
+ return a; \
300
+ }
301
+
302
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator+)
303
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator-)
304
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator*)
305
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator/)
306
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator%)
307
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator||)
308
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator<<)
309
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator>>)
310
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(maximum)
311
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(minimum)
312
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmadd)
313
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmsub)
314
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp)
315
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_max)
316
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_min)
317
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator&)
318
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator|)
319
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator^)
320
+ VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(operator~)
321
+
322
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator+=)
323
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator-=)
324
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator*=)
325
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator/=)
326
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator%=)
327
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator<<=)
328
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator>>=)
329
+
330
+ #undef VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL
331
+ #undef VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL
332
+ #undef VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL
333
+
334
+ template <typename T, int N, typename OpVec>
335
+ inline T vec_reduce_all(const OpVec& vec_fun, VectorizedN<T, N> acc_vec) {
336
+ Vectorized<T> vec_result = acc_vec[0];
337
+ for (int i = 1; i < N; i++) {
338
+ vec_result = vec_fun(vec_result, acc_vec[i]);
339
+ }
340
+ return vec_reduce_all(vec_fun, vec_result);
341
+ }
342
+
343
+ } // namespace CPU_CAPABILITY
344
+ } // namespace at::vec
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <ATen/Parallel.h>
5
+ #include <ATen/OpMathType.h>
6
+ #include <ATen/cpu/vec/functional.h>
7
+ #include <ATen/cpu/vec/vec.h>
8
+ #include <c10/util/complex.h>
9
+
10
+ // This header implements various unary operations using a MKL VML style
11
+ // interface.
12
+
13
+ // It implements various functions with a simple interface
14
+ // For example it enables the user to call vsin(float* out, const float* in,
15
+ // size) This functions takes a pointer to a continuous output array of floats and
16
+ // a constant input array. It will then apply sin to each value in the input
17
+ // array and write the result into the output array. out and in may point to the
18
+ // same memory, i.e. this fully supports in-place operations. These functions
19
+ // also implement their own parallelization, so take precautions when calling
20
+ // these from threaded functions.
21
+
22
+ // When MKL is available it will call into MKL's VML library similar to NumPy
23
+ // If MKL is not available it will use SLEEF.
24
+
25
+ // This file might be compiled under AVX or AVX2 when called from e.g.
26
+ // UnaryOpsKernel.cpp
27
+
28
+ #include <algorithm>
29
+ #include <cstddef>
30
+ #include <cstdint>
31
+ #include <cstring>
32
+ #include <type_traits>
33
+
34
+ #if AT_MKL_ENABLED() && !defined(__APPLE__)
35
+ #include <mkl.h>
36
+ #endif
37
+
38
+ namespace at {
39
+ namespace vml {
40
+ inline namespace CPU_CAPABILITY {
41
+
42
+ using namespace vec;
43
+
44
+ template <typename scalar_t>
45
+ inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) {
46
+ parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) {
47
+ map(
48
+ [](const Vectorized<scalar_t>& x) {
49
+ return Vectorized<scalar_t>((scalar_t)(1)) / x.sqrt();
50
+ },
51
+ out + begin,
52
+ in + begin,
53
+ end - begin);
54
+ });
55
+ }
56
+
57
+ // NB: We ignore numerical errors by convention and leave them to the user
58
+
59
+ #define IMPLEMENT_VML(op) \
60
+ template <typename scalar_t> \
61
+ inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \
62
+ using vec_t = Vectorized<vec_scalar_t<scalar_t>>; \
63
+ vec::map([](vec_t x) { return x.op(); }, out, in, size); \
64
+ } \
65
+
66
+ IMPLEMENT_VML(abs)
67
+ IMPLEMENT_VML(acos)
68
+ IMPLEMENT_VML(asin)
69
+ IMPLEMENT_VML(atan)
70
+ IMPLEMENT_VML(atanh)
71
+ IMPLEMENT_VML(ceil)
72
+ IMPLEMENT_VML(cos)
73
+ // IMPLEMENT_VML(cosh)
74
+ IMPLEMENT_VML(erf)
75
+ IMPLEMENT_VML(erfc)
76
+ IMPLEMENT_VML(erfinv)
77
+ IMPLEMENT_VML(exp)
78
+ IMPLEMENT_VML(expm1)
79
+ IMPLEMENT_VML(floor)
80
+ IMPLEMENT_VML(i0)
81
+ IMPLEMENT_VML(i0e)
82
+ IMPLEMENT_VML(digamma)
83
+ IMPLEMENT_VML(reciprocal)
84
+ IMPLEMENT_VML(log)
85
+ IMPLEMENT_VML(log10)
86
+ IMPLEMENT_VML(log1p)
87
+ IMPLEMENT_VML(log2)
88
+ IMPLEMENT_VML(neg)
89
+ IMPLEMENT_VML(sin)
90
+ // IMPLEMENT_VML(sinh)
91
+ IMPLEMENT_VML(sqrt)
92
+ IMPLEMENT_VML(round)
93
+ IMPLEMENT_VML(rsqrt)
94
+ IMPLEMENT_VML(tan)
95
+ IMPLEMENT_VML(tanh)
96
+ IMPLEMENT_VML(trunc)
97
+ IMPLEMENT_VML(lgamma)
98
+
99
+
100
+ #if AT_MKL_ENABLED() && !defined(__APPLE__)
101
+
102
+ // NB: LP64 MKL is the most commonly used and thus we assume it here. That means
103
+ // we need to expect MKL_INT to be of type int, which implies int32_t or int64_t in most
104
+ // cases.
105
+ static_assert(
106
+ std::is_same_v<MKL_INT, int32_t> || std::is_same_v<MKL_INT, int64_t>,
107
+ "MKL_INT is assumed to be int32_t or int64_t");
108
+ #define IMPLEMENT_VML_MKL_STUB(op, mklop, type, mkltype) \
109
+ template <> \
110
+ inline void v##op(type * out, const type * in, int64_t size) { \
111
+ int64_t max_mkl_ind = std::numeric_limits<MKL_INT>::max(); \
112
+ if (size <= static_cast<int64_t>(max_mkl_ind)) { \
113
+ vm##mkltype##mklop( \
114
+ size, in, out, VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
115
+ } else { \
116
+ MKL_INT ind = 0; \
117
+ int64_t chunks = size / max_mkl_ind; \
118
+ int64_t rest = size % max_mkl_ind; \
119
+ for (; ind < chunks; ind++) { \
120
+ vm##mkltype##mklop( \
121
+ max_mkl_ind, \
122
+ in + ind * max_mkl_ind, \
123
+ out + ind * max_mkl_ind, \
124
+ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
125
+ } \
126
+ vm##mkltype##mklop( \
127
+ rest, \
128
+ in + ind * max_mkl_ind, \
129
+ out + ind * max_mkl_ind, \
130
+ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
131
+ } \
132
+ }
133
+
134
+ #define IMPLEMENT_VML_MKL(op, mklop) \
135
+ IMPLEMENT_VML_MKL_STUB(op, mklop, float, s) \
136
+ IMPLEMENT_VML_MKL_STUB(op, mklop, double, d)
137
+
138
+ // NB: abs, cosh and sinh were temporarily disabled due to issues with Apple
139
+ // NB: expm1 is disabled because on some configs it produces expm1(nan)=-1
140
+ IMPLEMENT_VML_MKL(acos, Acos)
141
+ IMPLEMENT_VML_MKL(asin, Asin)
142
+ IMPLEMENT_VML_MKL(atan, Atan)
143
+ IMPLEMENT_VML_MKL(cos, Cos)
144
+ // IMPLEMENT_VML_MKL(cosh, Cosh)
145
+ IMPLEMENT_VML_MKL(erf, Erf)
146
+ IMPLEMENT_VML_MKL(erfc, Erfc)
147
+ IMPLEMENT_VML_MKL(erfinv, ErfInv)
148
+ IMPLEMENT_VML_MKL(exp, Exp)
149
+ // IMPLEMENT_VML_MKL(expm1, Expm1)
150
+ IMPLEMENT_VML_MKL(log, Ln)
151
+ IMPLEMENT_VML_MKL(log10, Log10)
152
+ IMPLEMENT_VML_MKL(sin, Sin)
153
+ // IMPLEMENT_VML_MKL(sinh, Sinh)
154
+ IMPLEMENT_VML_MKL(sqrt, Sqrt)
155
+ IMPLEMENT_VML_MKL(tan, Tan)
156
+ IMPLEMENT_VML_MKL(tanh, Tanh)
157
+ IMPLEMENT_VML_MKL(trunc, Trunc)
158
+
159
+ // Not vectorized in MKL version tested
160
+ // IMPLEMENT_VML_MKL(abs, Abs)
161
+ // IMPLEMENT_VML_MKL(log1p, Log1p)
162
+
163
+ #if INTEL_MKL_VERSION >= 20180406
164
+ IMPLEMENT_VML_MKL(log2, Log2)
165
+ #endif
166
+
167
+ #endif
168
+
169
+ } // namespace
170
+ } // namespace vml
171
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+
5
+ #include <ATen/cuda/CUDAContext.h>
6
+ #include <ATen/cuda/Exceptions.h>
7
+
8
+ #include <ATen/cudnn/cudnn-wrapper.h>
9
+ #include <ATen/cudnn/Utils.h>
10
+ #include <ATen/core/Tensor.h>
11
+ #include <ATen/TensorUtils.h>
12
+ #include <ATen/cuda/ATenCUDAGeneral.h>
13
+ #include <cuda.h>
14
+
15
+ #ifndef AT_PER_OPERATOR_HEADERS
16
+ #include <ATen/Functions.h>
17
+ #else
18
+ #include <ATen/ops/empty.h>
19
+ #endif
20
+
21
+ #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8907
22
+ #define USE_CUDNN_RNN_V8_API
23
+ #endif
24
+
25
+ namespace at { namespace native {
26
+
27
+ std::string cudnnTypeToString(cudnnDataType_t dtype);
28
+
29
+ // TODO: Add constructors for all of the descriptors
30
+
31
+ inline int dataSize(cudnnDataType_t dataType)
32
+ {
33
+ switch (dataType) {
34
+ #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8200
35
+ case CUDNN_DATA_BFLOAT16:
36
+ #endif
37
+ case CUDNN_DATA_HALF: return 2;
38
+ case CUDNN_DATA_FLOAT: return 4;
39
+ default: return 8;
40
+ }
41
+ }
42
+
43
+ // The stride for a size-1 dimensions is not uniquely determined; in
44
+ // fact, it can be anything you want, because the fact that the
45
+ // tensor is size 1 at this dimension means that you will never actually
46
+ // try advancing your pointer by this stride.
47
+ //
48
+ // However, CuDNN has a much more stringent requirement on strides:
49
+ // if you are passing a contiguous input, it better be the case
50
+ // that the stride for dim i is the product of the sizes of dims
51
+ // i+1 to the end. This stride is indeed uniquely determined. This
52
+ // function modifies 'stride' in place so this invariant holds.
53
+ template <typename T>
54
+ static inline void fixSizeOneDimStride(int dim, const T *size, T *stride, bool nhwc) {
55
+ int64_t z = 1;
56
+ int index = 0;
57
+ std::vector<int> permutation(dim);
58
+
59
+ if (nhwc) {
60
+ permutation[index++] = 1;
61
+ }
62
+ for (int d = dim-1; d > 1; d--) {
63
+ permutation[index++] = d;
64
+ }
65
+ if (!nhwc) {
66
+ permutation[index++] = 1;
67
+ }
68
+ permutation[index++] = 0;
69
+ for (int d : permutation) {
70
+ if (size[d] == 1) {
71
+ stride[d] = z;
72
+ } else {
73
+ z *= size[d];
74
+ }
75
+ }
76
+ }
77
+
78
+ template <typename T, cudnnStatus_t (*dtor)(T*)>
79
+ struct DescriptorDeleter {
80
+ void operator()(T* x) {
81
+ if (x != nullptr) {
82
+ AT_CUDNN_CHECK(dtor(x));
83
+ }
84
+ }
85
+ };
86
+
87
+ // A generic class for wrapping cuDNN descriptor types. All you need
88
+ // is to give the underlying type the Descriptor_t points to (usually,
89
+ // if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct),
90
+ // the constructor and the destructor. Subclasses are responsible
91
+ // for defining a set() function to actually set the descriptor.
92
+ //
93
+ // Descriptors default construct to a nullptr, and have a descriptor
94
+ // initialized the first time you call set() or any other initializing
95
+ // function.
96
+ template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
97
+ class TORCH_CUDA_CPP_API Descriptor {
98
+ public:
99
+ // TODO: Figure out why const-correctness doesn't work here
100
+
101
+ // Use desc() to access the underlying descriptor pointer in
102
+ // a read-only fashion. Most client code should use this.
103
+ // If the descriptor was never initialized, this will return
104
+ // nullptr.
105
+ T* desc() const { return desc_.get(); }
106
+ T* desc() { return desc_.get(); }
107
+
108
+ // Use mut_desc() to access the underlying descriptor pointer
109
+ // if you intend to modify what it points to (e.g., using
110
+ // cudnnSetFooDescriptor). This will ensure that the descriptor
111
+ // is initialized. Code in this file will use this function.
112
+ T* mut_desc() { init(); return desc_.get(); }
113
+ protected:
114
+ void init() {
115
+ if (desc_ == nullptr) {
116
+ T* raw_desc;
117
+ AT_CUDNN_CHECK(ctor(&raw_desc));
118
+ desc_.reset(raw_desc);
119
+ }
120
+ }
121
+ private:
122
+ std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
123
+ };
124
+
125
+ class TORCH_CUDA_CPP_API RNNDataDescriptor : public Descriptor<
126
+ cudnnRNNDataStruct,
127
+ &cudnnCreateRNNDataDescriptor,
128
+ &cudnnDestroyRNNDataDescriptor> {
129
+ public:
130
+ void set(const at::Tensor &t, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray);
131
+ private:
132
+ void set(cudnnDataType_t dataType, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray) {
133
+ AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, NULL));
134
+ }
135
+ };
136
+
137
+ class TORCH_CUDA_CPP_API TensorDescriptor : public Descriptor<
138
+ cudnnTensorStruct,
139
+ &cudnnCreateTensorDescriptor,
140
+ &cudnnDestroyTensorDescriptor> {
141
+ public:
142
+ TensorDescriptor() = default;
143
+ explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
144
+ set(t, pad);
145
+ }
146
+
147
+ // Note [CuDNN broadcast padding]
148
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
149
+ // pad specifies the minimum dimensionality of the tensor descriptor
150
+ // we produce (it doesn't have anything to do with, e.g., convolution
151
+ // padding). If 't' is lower-dimensional than 'pad', the remaining
152
+ // dimensions (on the right) are padded with ones. This doesn't
153
+ // affect the underlying data layout. This is particularly useful for
154
+ // dealing with a peculiarity of the CuDNN API, which is that broadcasting in CuDNN is
155
+ // done in two steps: first, the client code is expected to pad out
156
+ // (the dimensions) input tensors to be the same dimension as the
157
+ // target broadcast, and then second, CuDNN takes of actually
158
+ // broadcasting size 1 dimensions.
159
+
160
+ void set(const at::Tensor &t, size_t pad = 0);
161
+ void set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad = 0);
162
+ void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
163
+
164
+ void print();
165
+
166
+ private:
167
+ void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad, bool nhwc);
168
+
169
+ void set(cudnnDataType_t dataType, int dim, int* size, int* stride, bool nhwc) {
170
+ fixSizeOneDimStride<int>(dim, size, stride, nhwc);
171
+ AT_CUDNN_CHECK(cudnnSetTensorNdDescriptor(mut_desc(), dataType, dim, size, stride));
172
+ }
173
+ };
174
+
175
+ std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
176
+
177
+ class TORCH_CUDA_CPP_API FilterDescriptor : public Descriptor<
178
+ cudnnFilterStruct,
179
+ &cudnnCreateFilterDescriptor,
180
+ &cudnnDestroyFilterDescriptor> {
181
+ public:
182
+ void set(const at::Tensor &t, int64_t pad = 0) {
183
+ set(t, at::MemoryFormat::Contiguous, pad);
184
+ }
185
+
186
+ void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
187
+
188
+ void print();
189
+ private:
190
+ void set(cudnnDataType_t dataType, int dim, int* size, cudnnTensorFormat_t filter_format) {
191
+ AT_CUDNN_CHECK(cudnnSetFilterNdDescriptor(mut_desc(), dataType, filter_format, dim, size));
192
+ }
193
+ };
194
+
195
+ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d);
196
+
197
+ struct TORCH_CUDA_CPP_API ConvolutionDescriptor
198
+ : public Descriptor<
199
+ cudnnConvolutionStruct,
200
+ &cudnnCreateConvolutionDescriptor,
201
+ &cudnnDestroyConvolutionDescriptor> {
202
+ void set(cudnnDataType_t dataType, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool allow_tf32) {
203
+ cudnnDataType_t mathType = dataType;
204
+ if (dataType == CUDNN_DATA_HALF) mathType = CUDNN_DATA_FLOAT;
205
+ AT_CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale,
206
+ CUDNN_CROSS_CORRELATION, mathType));
207
+ AT_CUDNN_CHECK(cudnnSetConvolutionGroupCount(mut_desc(), groups));
208
+ // See Note [behavior of cudnnFind and cudnnGet]
209
+ AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_DEFAULT_MATH));
210
+ if(dataType == CUDNN_DATA_HALF) {
211
+ AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_TENSOR_OP_MATH));
212
+ } else if (dataType == CUDNN_DATA_FLOAT && !allow_tf32) {
213
+ AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_FMA_MATH));
214
+ }
215
+ }
216
+ };
217
+
218
+ struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
219
+ : public Descriptor<
220
+ cudnnSpatialTransformerStruct,
221
+ &cudnnCreateSpatialTransformerDescriptor,
222
+ &cudnnDestroySpatialTransformerDescriptor> {
223
+ void set(cudnnDataType_t dataType, int dim, int* size) {
224
+ AT_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(mut_desc(), CUDNN_SAMPLER_BILINEAR, dataType, dim, size));
225
+ }
226
+ };
227
+
228
+ struct TORCH_CUDA_CPP_API DropoutDescriptor
229
+ : public Descriptor<
230
+ cudnnDropoutStruct,
231
+ &cudnnCreateDropoutDescriptor,
232
+ &cudnnDestroyDropoutDescriptor> {
233
+ at::Tensor state;
234
+
235
+ // Initialize a dropout descriptor's RNG state.
236
+ // WARNING: This function is very expensive, avoid calling this function!
237
+ void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& options) {
238
+ TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
239
+ size_t state_size;
240
+ AT_CUDNN_CHECK(cudnnDropoutGetStatesSize(handle, &state_size));
241
+ AT_ASSERT(options.device().type() == kCUDA);
242
+ AT_ASSERT(options.dtype() == kByte);
243
+ state = at::empty({static_cast<int64_t>(state_size)}, options);
244
+ AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size, seed));
245
+ }
246
+
247
+ // Restore a dropout descriptor given a dropout probability and existing RNG state.
248
+ void set(cudnnHandle_t handle, float dropout, at::Tensor state_) {
249
+ TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
250
+ state = state_;
251
+ void *state_ptr = state.data_ptr();
252
+ size_t state_size = state.size(0);
253
+ // NB: The seed doesn't actually matter, so we give a dummy value
254
+ AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0 /* seed */));
255
+ }
256
+
257
+ // Restore a dropout descriptor corresponding to no dropout
258
+ void set_no_dropout(cudnnHandle_t handle) {
259
+ // NB: seed doesn't matter when dropout = 0, because no random number
260
+ // initialization actually takes place when there is no dropout.
261
+ // NB: Empirically, cudnnSetDropoutDescriptor is cheap when
262
+ // dropout == 0
263
+ AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */));
264
+ }
265
+ };
266
+
267
+ struct TORCH_CUDA_CPP_API RNNDescriptor : public Descriptor<
268
+ cudnnRNNStruct,
269
+ &cudnnCreateRNNDescriptor,
270
+ &cudnnDestroyRNNDescriptor> {
271
+ DropoutDescriptor dropout_desc_;
272
+ void set(cudnnHandle_t handle,
273
+ #ifdef USE_CUDNN_RNN_V8_API
274
+ int input_size,
275
+ bool packed,
276
+ #endif
277
+ int hidden_size, int proj_size, int num_layers, DropoutDescriptor&& dropout_desc,
278
+ cudnnRNNInputMode_t input_mode, cudnnDirectionMode_t bidirectional,
279
+ cudnnRNNMode_t mode, cudnnDataType_t datatype, cudnnDataType_t input_type, cudnnRNNAlgo_t algo, bool allow_tf32) {
280
+ dropout_desc_ = std::move(dropout_desc);
281
+ #ifndef USE_CUDNN_RNN_V8_API
282
+ AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v6(
283
+ handle,
284
+ mut_desc(),
285
+ hidden_size,
286
+ num_layers,
287
+ dropout_desc_.desc(),
288
+ input_mode,
289
+ bidirectional,
290
+ mode,
291
+ algo,
292
+ datatype));
293
+ if (proj_size != 0) {
294
+ AT_CUDNN_CHECK(cudnnSetRNNProjectionLayers(
295
+ handle,
296
+ /*rnnDesc=*/mut_desc(),
297
+ /*recProjSize=*/proj_size,
298
+ /*outProjSize=*/0));
299
+ }
300
+ cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
301
+ if (prop->major >= 7) {
302
+ if (input_type == CUDNN_DATA_HALF) {
303
+ cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_TENSOR_OP_MATH);
304
+ }
305
+ else if (input_type == CUDNN_DATA_FLOAT && !allow_tf32) {
306
+ cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_FMA_MATH);
307
+ }
308
+ else {
309
+ // Technically, as the default it's not necessary to explicitly
310
+ // set this.
311
+ cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_DEFAULT_MATH);
312
+ }
313
+ }
314
+ #else
315
+ cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
316
+ auto math_type = CUDNN_DEFAULT_MATH;
317
+ if (prop->major >= 7) {
318
+ if (input_type == CUDNN_DATA_HALF) {
319
+ math_type = CUDNN_TENSOR_OP_MATH;
320
+ } else if (!allow_tf32) {
321
+ math_type = CUDNN_FMA_MATH;
322
+ }
323
+ }
324
+ AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v8(
325
+ mut_desc(),
326
+ algo,
327
+ mode,
328
+ CUDNN_RNN_DOUBLE_BIAS,
329
+ bidirectional,
330
+ input_mode,
331
+ input_type,
332
+ datatype,
333
+ math_type,
334
+ input_size,
335
+ hidden_size,
336
+ proj_size ? proj_size : hidden_size,
337
+ num_layers,
338
+ dropout_desc_.desc(),
339
+ packed ? CUDNN_RNN_PADDED_IO_DISABLED : CUDNN_RNN_PADDED_IO_ENABLED));
340
+ #endif
341
+ }
342
+ };
343
+
344
+ struct TORCH_CUDA_CPP_API CTCLossDescriptor
345
+ : public Descriptor<
346
+ cudnnCTCLossStruct,
347
+ &cudnnCreateCTCLossDescriptor,
348
+ &cudnnDestroyCTCLossDescriptor> {
349
+ void set(cudnnDataType_t datatype) {
350
+ AT_CUDNN_CHECK(cudnnSetCTCLossDescriptor(mut_desc(), datatype));
351
+ }
352
+ void setEx(
353
+ cudnnDataType_t datatype,
354
+ cudnnLossNormalizationMode_t normMode,
355
+ cudnnNanPropagation_t gradMode) {
356
+ AT_CUDNN_CHECK(
357
+ cudnnSetCTCLossDescriptorEx(mut_desc(), datatype, normMode, gradMode));
358
+ }
359
+ };
360
+
361
+ struct TORCH_CUDA_CPP_API ActivationDescriptor
362
+ : public Descriptor<
363
+ cudnnActivationStruct,
364
+ &cudnnCreateActivationDescriptor,
365
+ &cudnnDestroyActivationDescriptor> {
366
+ void set(cudnnActivationMode_t mode) {
367
+ AT_ASSERT(
368
+ mode == CUDNN_ACTIVATION_RELU,
369
+ "TODO: support more cuDNN activation modes");
370
+ AT_CUDNN_CHECK(cudnnSetActivationDescriptor(
371
+ mut_desc(),
372
+ mode,
373
+ cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN,
374
+ std::numeric_limits<double>::max()));
375
+ }
376
+ };
377
+
378
+ union Constant
379
+ {
380
+ float f;
381
+ double d;
382
+ Constant(cudnnDataType_t dataType, double value) {
383
+ if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) {
384
+ f = static_cast<float>(value);
385
+ } else {
386
+ d = value;
387
+ }
388
+ }
389
+ };
390
+
391
+ }} // namespace
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cudnn/cudnn-wrapper.h>
4
+ #include <ATen/cuda/ATenCUDAGeneral.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ TORCH_CUDA_CPP_API cudnnHandle_t getCudnnHandle();
9
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/cudnn/Handle.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cudnn/cudnn-wrapper.h>
4
+ #include <ATen/Tensor.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ TORCH_CUDA_CPP_API cudnnDataType_t
9
+ getCudnnDataTypeFromScalarType(const at::ScalarType dtype);
10
+ cudnnDataType_t getCudnnDataType(const at::Tensor& tensor);
11
+
12
+ int64_t cudnn_version();
13
+
14
+ }} // namespace at::cudnn
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/cuda/Exceptions.h>
5
+ #include <ATen/cudnn/cudnn-wrapper.h>
6
+ #include <ATen/cudnn/Handle.h>
7
+
8
+ namespace at { namespace native {
9
+
10
+ // cuDNN has a buggy check for tensor being contiguous (that is, it does
11
+ // not ignore stride for dimension that is equal to 0). This function
12
+ // makes tensors which have zero stride contiguous, by setting the
13
+ // strides to 1 as cuDNN likes.
14
+ inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
15
+ for (auto s : t.strides()) {
16
+ if (s == 0) return t.contiguous();
17
+ }
18
+ return t;
19
+ }
20
+
21
+ }}
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cudnn.h>
4
+
5
+ #define STRINGIFY(x) #x
6
+ #define STRING(x) STRINGIFY(x)
7
+
8
+ #if CUDNN_MAJOR < 6
9
+ #pragma message ("CuDNN v" STRING(CUDNN_MAJOR) " found, but need at least CuDNN v6. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0")
10
+ #pragma message "We strongly encourage you to move to 6.0 and above."
11
+ #pragma message "This message is intended to annoy you enough to update."
12
+ #endif
13
+
14
+ #undef STRINGIFY
15
+ #undef STRING
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <ATen/core/TensorBase.h>
5
+
6
+ namespace at::detail {
7
+
8
+ C10_EXPORT TensorBase empty_mps(
9
+ IntArrayRef size,
10
+ c10::optional<ScalarType> dtype_opt,
11
+ c10::optional<Layout> layout_opt,
12
+ c10::optional<Device> device_opt,
13
+ c10::optional<bool> pin_memory_opt,
14
+ c10::optional<c10::MemoryFormat> memory_format_opt);
15
+ C10_EXPORT TensorBase empty_mps(
16
+ IntArrayRef size, const TensorOptions &options);
17
+
18
+ C10_EXPORT TensorBase empty_strided_mps(
19
+ IntArrayRef size,
20
+ IntArrayRef stride,
21
+ ScalarType dtype,
22
+ c10::optional<Device> device_opt);
23
+
24
+ C10_EXPORT TensorBase empty_strided_mps(
25
+ IntArrayRef size,
26
+ IntArrayRef stride,
27
+ const TensorOptions &options);
28
+
29
+ } // namespace at::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at::mps {
4
+
5
+ static const char * indexing_metal_shaders = R"INDEX_METAL(
6
+ #include <metal_stdlib>
7
+ #include <metal_atomic>
8
+
9
+ using namespace metal;
10
+
11
+ #if __METAL_VERSION__ < 300
12
+ struct IndexAB {
13
+ // Allow up to 16 indices
14
+ metal::array<constant void *, 16> indexArray [[ id(0) ]];
15
+ };
16
+ #else
17
+ struct IndexAB {
18
+ constant int64_t* indexArray;
19
+ };
20
+
21
+ #endif
22
+
23
+ template<typename T, typename OffsetsT>
24
+ kernel void index_select(
25
+ #if __METAL_VERSION__ >= 300
26
+ constant IndexAB * indexAB [[buffer(0)]],
27
+ #else
28
+ constant IndexAB & indexAB [[buffer(0)]],
29
+ #endif
30
+ constant void * indexSizes [[buffer(1)]],
31
+ constant void * indexStrides [[buffer(2)]],
32
+ constant OffsetsT * offsets [[buffer(3)]],
33
+ constant void * inputData [[buffer(4)]],
34
+ device void * outputData [[buffer(5)]],
35
+ constant uint32_t & num_indices [[buffer(6)]],
36
+ uint thread_index [[thread_position_in_grid]]) {
37
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
38
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
39
+ int64_t offset = 0;
40
+ for (uint32_t i = 0; i < num_indices; i++) {
41
+ #if __METAL_VERSION__ >= 300
42
+ constant int64_t* indexArray = indexAB[i].indexArray;
43
+ #else
44
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
45
+ #endif
46
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
47
+ if (index < 0) {
48
+ index += index_sizes[i];
49
+ }
50
+ offset += index * index_strides[i];
51
+ }
52
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x);
53
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y + offset);
54
+ *out = *in;
55
+ }
56
+
57
+ template<typename T, typename OffsetsT>
58
+ void index_put_impl(
59
+ #if __METAL_VERSION__ >= 300
60
+ constant IndexAB * indexAB,
61
+ #else
62
+ constant IndexAB & indexAB,
63
+ #endif
64
+ constant int64_t * index_sizes,
65
+ constant int64_t * index_strides,
66
+ constant OffsetsT * offsets,
67
+ constant void * inputData,
68
+ device void * outputData,
69
+ constant uint32_t & num_indices,
70
+ uint thread_index) {
71
+ int64_t offset = 0;
72
+ for (uint32_t i = 0; i < num_indices; i++) {
73
+ #if __METAL_VERSION__ >= 300
74
+ constant int64_t* indexArray = indexAB[i].indexArray;
75
+ #else
76
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
77
+ #endif
78
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
79
+
80
+ if (index < 0) {
81
+ index += index_sizes[i];
82
+ }
83
+ offset += index * index_strides[i];
84
+ }
85
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset);
86
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y);
87
+ *out = *in;
88
+ }
89
+
90
+ template<typename T, typename OffsetsT>
91
+ kernel void index_put_serial(
92
+ #if __METAL_VERSION__ >= 300
93
+ constant IndexAB * indexAB [[buffer(0)]],
94
+ #else
95
+ constant IndexAB & indexAB [[buffer(0)]],
96
+ #endif
97
+ constant void * indexSizes [[buffer(1)]],
98
+ constant void * indexStrides [[buffer(2)]],
99
+ constant OffsetsT * offsets [[buffer(3)]],
100
+ constant void * inputData [[buffer(4)]],
101
+ device void * outputData [[buffer(5)]],
102
+ constant uint32_t & num_indices [[buffer(6)]],
103
+ constant uint * numIters [[buffer(7)]],
104
+ uint thread_index [[thread_position_in_grid]]) {
105
+
106
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
107
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
108
+
109
+ for (uint iter_i = 0; iter_i < *numIters; iter_i++) {
110
+ index_put_impl<T>(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, iter_i);
111
+ }
112
+ }
113
+
114
+ template<typename T, typename OffsetsT>
115
+ kernel void index_put(
116
+ #if __METAL_VERSION__ >= 300
117
+ constant IndexAB * indexAB [[buffer(0)]],
118
+ #else
119
+ constant IndexAB & indexAB [[buffer(0)]],
120
+ #endif
121
+ constant void * indexSizes [[buffer(1)]],
122
+ constant void * indexStrides [[buffer(2)]],
123
+ constant OffsetsT * offsets [[buffer(3)]],
124
+ constant void * inputData [[buffer(4)]],
125
+ device void * outputData [[buffer(5)]],
126
+ constant uint32_t & num_indices [[buffer(6)]],
127
+ uint thread_index [[thread_position_in_grid]]) {
128
+
129
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
130
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
131
+ index_put_impl<T>(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, thread_index);
132
+ }
133
+
134
+ #if __METAL_VERSION__ < 300
135
+ #define REGISTER_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
136
+ template \
137
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
138
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
139
+ constant IndexAB & indexAB [[buffer(0)]], \
140
+ constant void * indexSizes [[buffer(1)]], \
141
+ constant void * indexStrides [[buffer(2)]], \
142
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
143
+ constant void * inputData [[buffer(4)]], \
144
+ device void * outputData [[buffer(5)]], \
145
+ constant uint32_t & num_indices [[buffer(6)]], \
146
+ uint thread_index [[thread_position_in_grid]]);
147
+ #else
148
+ #define REGISTER_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
149
+ template \
150
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
151
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
152
+ constant IndexAB * indexAB [[buffer(0)]], \
153
+ constant void * indexSizes [[buffer(1)]], \
154
+ constant void * indexStrides [[buffer(2)]], \
155
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
156
+ constant void * inputData [[buffer(4)]], \
157
+ device void * outputData [[buffer(5)]], \
158
+ constant uint32_t & num_indices [[buffer(6)]], \
159
+ uint thread_index [[thread_position_in_grid]]);
160
+ #endif
161
+
162
+ #define REGISTER_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \
163
+ REGISTER_INDEX_OP(8bit, idx32, char, INDEX_OP_TYPE, uint3); \
164
+ REGISTER_INDEX_OP(8bit, idx64, char, INDEX_OP_TYPE, ulong3); \
165
+ REGISTER_INDEX_OP(16bit, idx32, short, INDEX_OP_TYPE, uint3); \
166
+ REGISTER_INDEX_OP(16bit, idx64, short, INDEX_OP_TYPE, ulong3); \
167
+ REGISTER_INDEX_OP(32bit, idx32, int, INDEX_OP_TYPE, uint3); \
168
+ REGISTER_INDEX_OP(32bit, idx64, int, INDEX_OP_TYPE, ulong3); \
169
+ REGISTER_INDEX_OP(64bit, idx32, long, INDEX_OP_TYPE, uint3); \
170
+ REGISTER_INDEX_OP(64bit, idx64, long, INDEX_OP_TYPE, ulong3);
171
+
172
+ REGISTER_INDEX_OP_ALL_DTYPES(select);
173
+ REGISTER_INDEX_OP_ALL_DTYPES(put);
174
+
175
+ #if __METAL_VERSION__ < 300
176
+ #define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
177
+ template \
178
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
179
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
180
+ constant IndexAB & indexAB [[buffer(0)]], \
181
+ constant void * indexSizes [[buffer(1)]], \
182
+ constant void * indexStrides [[buffer(2)]], \
183
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
184
+ constant void * inputData [[buffer(4)]], \
185
+ device void * outputData [[buffer(5)]], \
186
+ constant uint32_t & num_indices [[buffer(6)]], \
187
+ constant uint * numIters [[buffer(7)]], \
188
+ uint thread_index [[thread_position_in_grid]]);
189
+ #else
190
+ #define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
191
+ template \
192
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
193
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
194
+ constant IndexAB * indexAB [[buffer(0)]], \
195
+ constant void * indexSizes [[buffer(1)]], \
196
+ constant void * indexStrides [[buffer(2)]], \
197
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
198
+ constant void * inputData [[buffer(4)]], \
199
+ device void * outputData [[buffer(5)]], \
200
+ constant uint32_t & num_indices [[buffer(6)]], \
201
+ constant uint * numIters [[buffer(7)]], \
202
+ uint thread_index [[thread_position_in_grid]]);
203
+ #endif
204
+
205
+ #define REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \
206
+ REGISTER_SINGLE_THREADED_INDEX_OP(8bit, idx32, char, INDEX_OP_TYPE, uint3); \
207
+ REGISTER_SINGLE_THREADED_INDEX_OP(8bit, idx64, char, INDEX_OP_TYPE, ulong3); \
208
+ REGISTER_SINGLE_THREADED_INDEX_OP(16bit, idx32, short, INDEX_OP_TYPE, uint3); \
209
+ REGISTER_SINGLE_THREADED_INDEX_OP(16bit, idx64, short, INDEX_OP_TYPE, ulong3); \
210
+ REGISTER_SINGLE_THREADED_INDEX_OP(32bit, idx32, int, INDEX_OP_TYPE, uint3); \
211
+ REGISTER_SINGLE_THREADED_INDEX_OP(32bit, idx64, int, INDEX_OP_TYPE, ulong3); \
212
+ REGISTER_SINGLE_THREADED_INDEX_OP(64bit, idx32, long, INDEX_OP_TYPE, uint3); \
213
+ REGISTER_SINGLE_THREADED_INDEX_OP(64bit, idx64, long, INDEX_OP_TYPE, ulong3);
214
+
215
+ REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(put_serial);
216
+
217
+ template<typename StridesT, typename DataT>
218
+ kernel void kernel_index_offsets(constant StridesT * strides [[buffer(0)]],
219
+ device DataT * data_offsets [[buffer(1)]],
220
+ constant uint * iter_shape [[buffer(2)]],
221
+ constant uint & num_dimensions [[buffer(3)]],
222
+ uint thread_index [[thread_position_in_grid]]) {
223
+ data_offsets[thread_index] = 0;
224
+ uint32_t idx = thread_index;
225
+ for (uint32_t dim = 0; dim < num_dimensions; dim++) {
226
+ uint32_t remainder = idx % iter_shape[dim];
227
+ idx /= iter_shape[dim];
228
+
229
+ data_offsets[thread_index] += remainder * DataT(strides[dim]);
230
+ }
231
+ }
232
+
233
+ template
234
+ [[host_name("kernel_index_offsets_32")]]
235
+ kernel void kernel_index_offsets<packed_uint3, uint3>(
236
+ constant packed_uint3 * strides [[buffer(0)]],
237
+ device uint3 * data_offsets [[buffer(1)]],
238
+ constant uint * iter_shape [[buffer(2)]],
239
+ constant uint & num_dimensions [[buffer(3)]],
240
+ uint thread_index [[thread_position_in_grid]]);
241
+
242
+ template
243
+ [[host_name("kernel_index_offsets_64")]]
244
+ kernel void kernel_index_offsets<packed_uint3, ulong3>(
245
+ constant packed_uint3 * strides [[buffer(0)]],
246
+ device ulong3 * data_offsets [[buffer(1)]],
247
+ constant uint * iter_shape [[buffer(2)]],
248
+ constant uint & num_dimensions [[buffer(3)]],
249
+ uint thread_index [[thread_position_in_grid]]);
250
+
251
+ template<typename T, typename E, typename OffsetsT>
252
+ kernel void index_put_accumulate_native_dtypes(
253
+ #if __METAL_VERSION__ >= 300
254
+ constant IndexAB * indexAB [[buffer(0)]],
255
+ #else
256
+ constant IndexAB & indexAB [[buffer(0)]],
257
+ #endif
258
+ constant void * indexSizes [[buffer(1)]],
259
+ constant void * indexStrides [[buffer(2)]],
260
+ constant OffsetsT * offsets [[buffer(3)]],
261
+ constant void * inputData [[buffer(4)]],
262
+ device void * outputData [[buffer(5)]],
263
+ constant uint32_t & num_indices [[buffer(6)]],
264
+ uint thread_index [[thread_position_in_grid]]) {
265
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
266
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
267
+ int64_t offset = 0;
268
+ for (uint32_t i = 0; i < num_indices; i++) {
269
+ #if __METAL_VERSION__ >= 300
270
+ constant int64_t* indexArray = indexAB[i].indexArray;
271
+ #else
272
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
273
+ #endif
274
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
275
+ if (index < 0) {
276
+ index += index_sizes[i];
277
+ }
278
+ offset += index * index_strides[i];
279
+ }
280
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset);
281
+ constant E * in = (constant E*)((constant char*)inputData + offsets[thread_index].y);
282
+ atomic_fetch_add_explicit(out, *in, memory_order_relaxed);
283
+ }
284
+
285
+ template<typename T>
286
+ __attribute__((__always_inline__)) void atomic_fetch_add_relaxed(device void * addr, T value) {
287
+ device atomic_uint* uintAddr = (device atomic_uint*)addr;
288
+ uint expected = atomic_load_explicit(uintAddr, memory_order_relaxed);
289
+ T updated = as_type<T>(expected) + value;
290
+ while (!atomic_compare_exchange_weak_explicit(uintAddr, &expected, as_type<uint>(updated), memory_order_relaxed, memory_order_relaxed)) {
291
+ updated = as_type<T>(expected) + value;
292
+ }
293
+ }
294
+
295
+ template<typename T, typename OffsetsT>
296
+ kernel void atomic_index_put_accumulate(
297
+ #if __METAL_VERSION__ >= 300
298
+ constant IndexAB * indexAB [[buffer(0)]],
299
+ #else
300
+ constant IndexAB & indexAB [[buffer(0)]],
301
+ #endif
302
+ constant void * indexSizes [[buffer(1)]],
303
+ constant void * indexStrides [[buffer(2)]],
304
+ constant OffsetsT * offsets [[buffer(3)]],
305
+ constant void * inputData [[buffer(4)]],
306
+ device void * outputData [[buffer(5)]],
307
+ constant uint32_t & num_indices [[buffer(6)]],
308
+ uint thread_index [[thread_position_in_grid]]) {
309
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
310
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
311
+ int64_t offset = 0;
312
+ for (uint32_t i = 0; i < num_indices; i++) {
313
+ #if __METAL_VERSION__ >= 300
314
+ constant int64_t* indexArray = indexAB[i].indexArray;
315
+ #else
316
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
317
+ #endif
318
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
319
+ if (index < 0) {
320
+ index += index_sizes[i];
321
+ }
322
+ offset += index * index_strides[i];
323
+ }
324
+ device void * out = (device void*)((device char*)outputData + offsets[thread_index].x + offset);
325
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y);
326
+ atomic_fetch_add_relaxed<T>(out, *in);
327
+ }
328
+
329
+ template
330
+ [[host_name("index_put_accumulate_32bit_float_idx32")]]
331
+ kernel void atomic_index_put_accumulate<float, uint3>(
332
+ #if __METAL_VERSION__ >= 300
333
+ constant IndexAB * indexAB [[buffer(0)]],
334
+ #else
335
+ constant IndexAB & indexAB [[buffer(0)]],
336
+ #endif
337
+ constant void * indexSizes [[buffer(1)]],
338
+ constant void * indexStrides [[buffer(2)]],
339
+ constant uint3 * offsets [[buffer(3)]],
340
+ constant void * inputData [[buffer(4)]],
341
+ device void * outputData [[buffer(5)]],
342
+ constant uint32_t & num_indices [[buffer(6)]],
343
+ uint thread_index [[thread_position_in_grid]]);
344
+
345
+ template
346
+ [[host_name("index_put_accumulate_32bit_float_idx64")]]
347
+ kernel void atomic_index_put_accumulate<float, ulong3>(
348
+ #if __METAL_VERSION__ >= 300
349
+ constant IndexAB * indexAB [[buffer(0)]],
350
+ #else
351
+ constant IndexAB & indexAB [[buffer(0)]],
352
+ #endif
353
+ constant void * indexSizes [[buffer(1)]],
354
+ constant void * indexStrides [[buffer(2)]],
355
+ constant ulong3 * offsets [[buffer(3)]],
356
+ constant void * inputData [[buffer(4)]],
357
+ device void * outputData [[buffer(5)]],
358
+ constant uint32_t & num_indices [[buffer(6)]],
359
+ uint thread_index [[thread_position_in_grid]]);
360
+
361
+ template
362
+ [[host_name("index_put_accumulate_32bit_int_idx32")]]
363
+ kernel void index_put_accumulate_native_dtypes<atomic_int, int, uint3>(
364
+ #if __METAL_VERSION__ >= 300
365
+ constant IndexAB * indexAB [[buffer(0)]],
366
+ #else
367
+ constant IndexAB & indexAB [[buffer(0)]],
368
+ #endif
369
+ constant void * indexSizes [[buffer(1)]],
370
+ constant void * indexStrides [[buffer(2)]],
371
+ constant uint3 * offsets [[buffer(3)]],
372
+ constant void * inputData [[buffer(4)]],
373
+ device void * outputData [[buffer(5)]],
374
+ constant uint32_t & num_indices [[buffer(6)]],
375
+ uint thread_index [[thread_position_in_grid]]);
376
+
377
+ template
378
+ [[host_name("index_put_accumulate_32bit_int_idx64")]]
379
+ kernel void index_put_accumulate_native_dtypes<atomic_int, int, ulong3>(
380
+ #if __METAL_VERSION__ >= 300
381
+ constant IndexAB * indexAB [[buffer(0)]],
382
+ #else
383
+ constant IndexAB & indexAB [[buffer(0)]],
384
+ #endif
385
+ constant void * indexSizes [[buffer(1)]],
386
+ constant void * indexStrides [[buffer(2)]],
387
+ constant ulong3 * offsets [[buffer(3)]],
388
+ constant void * inputData [[buffer(4)]],
389
+ device void * outputData [[buffer(5)]],
390
+ constant uint32_t & num_indices [[buffer(6)]],
391
+ uint thread_index [[thread_position_in_grid]]);
392
+ )INDEX_METAL";
393
+
394
+ static const char *SCATTER_OPS_TEMPLATE = R"METAL_SCATTER(
395
+ struct __attribute__ ((packed)) packed_uint5{{
396
+ uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
397
+ }};
398
+
399
+ template<typename Y, typename X>
400
+ Y cast(const X x);
401
+
402
+ template<>
403
+ {1} cast<{1}, {0}>(const {0} x) {{
404
+ return {2};
405
+ }}
406
+
407
+ kernel void scatter_kernel_5(uint linear_index [[thread_position_in_grid]],
408
+ constant void * src_ [[buffer(0)]],
409
+ device void * dst_ [[buffer(1)]],
410
+ constant packed_uint5 & size [[buffer(2)]],
411
+ constant packed_uint5 & stride [[buffer(3)]],
412
+ constant uint32_t & numel [[buffer(4)]]) {{
413
+ if (linear_index >= numel) return;
414
+
415
+ constant {0} * src = (constant {0} *)src_;
416
+ device {1} * dst = (device {1} *)dst_;
417
+
418
+ packed_uint5 local_index;
419
+ local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
420
+ local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
421
+ local_index.z = linear_index / (size.u * size.w) % size.z;
422
+ local_index.w = linear_index / size.u % size.w;
423
+ local_index.u = linear_index % size.u;
424
+
425
+ packed_uint5 strided_index;
426
+ strided_index.x = local_index.x * stride.x;
427
+ strided_index.y = local_index.y * stride.y;
428
+ strided_index.z = local_index.z * stride.z;
429
+ strided_index.w = local_index.w * stride.w;
430
+ strided_index.u = local_index.u * stride.u;
431
+
432
+ dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u] = cast<{1}>(src[linear_index]);
433
+ }}
434
+
435
+ kernel void scatter_kernel_4(uint linear_index [[thread_position_in_grid]],
436
+ constant void * src_ [[buffer(0)]],
437
+ device void * dst_ [[buffer(1)]],
438
+ constant packed_uint4 & size [[buffer(2)]],
439
+ constant packed_uint4 & stride [[buffer(3)]],
440
+ constant uint32_t & numel [[buffer(4)]]) {{
441
+ if (linear_index >= numel) return;
442
+
443
+ constant {0} * src = (constant {0} *)src_;
444
+ device {1} * dst = (device {1} *)dst_;
445
+
446
+ packed_uint4 local_index;
447
+ local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0];
448
+ local_index.y = linear_index / (size[3] * size[2]) % size[1];
449
+ local_index.z = linear_index / size[3] % size[2];
450
+ local_index.w = linear_index % size[3];
451
+
452
+ const packed_uint4 strided_index = local_index * stride;
453
+ dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w] = cast<{1}>(src[linear_index]);
454
+ }}
455
+
456
+ kernel void scatter_kernel_3(uint linear_index [[thread_position_in_grid]],
457
+ constant void * src_ [[buffer(0)]],
458
+ device void * dst_ [[buffer(1)]],
459
+ constant packed_uint3 & size [[buffer(2)]],
460
+ constant packed_uint3 & stride [[buffer(3)]],
461
+ constant uint32_t & numel [[buffer(4)]]) {{
462
+ if (linear_index >= numel) return;
463
+
464
+ constant {0} * src = (constant {0} *)src_;
465
+ device {1} * dst = (device {1} *)dst_;
466
+
467
+ packed_uint3 local_index;
468
+ local_index.x = linear_index / (size[2] * size[1]) % size[0];
469
+ local_index.y = linear_index / size[2] % size[1];
470
+ local_index.z = linear_index % size[2];
471
+
472
+ const packed_uint3 strided_index = local_index * stride;
473
+ dst[strided_index.x + strided_index.y + strided_index.z] = cast<{1}>(src[linear_index]);
474
+ }}
475
+
476
+ kernel void scatter_kernel_2(uint linear_index [[thread_position_in_grid]],
477
+ constant void * src_ [[buffer(0)]],
478
+ device void * dst_ [[buffer(1)]],
479
+ constant packed_uint2 & size [[buffer(2)]],
480
+ constant packed_uint2 & stride [[buffer(3)]],
481
+ constant uint32_t & numel [[buffer(4)]]) {{
482
+ if (linear_index >= numel) return;
483
+
484
+ constant {0} * src = (constant {0} *)src_;
485
+ device {1} * dst = (device {1} *)dst_;
486
+
487
+ packed_uint2 local_index;
488
+ local_index.x = linear_index / size[1] % size[0];
489
+ local_index.y = linear_index % size[1];
490
+
491
+ const packed_uint2 strided_index = local_index * stride;
492
+ dst[strided_index.x + strided_index.y] = cast<{1}>(src[linear_index]);
493
+ }}
494
+
495
+ kernel void scatter_kernel_1(uint linear_index [[thread_position_in_grid]],
496
+ constant void * src_ [[buffer(0)]],
497
+ device void * dst_ [[buffer(1)]],
498
+ constant int & size [[buffer(2)]],
499
+ constant int & stride [[buffer(3)]],
500
+ constant uint32_t & numel [[buffer(4)]]) {{
501
+ if (linear_index >= numel) return;
502
+
503
+ constant {0} * src = (constant {0} *)src_;
504
+ device {1} * dst = (device {1} *)dst_;
505
+
506
+ const int local_index = linear_index % size;
507
+ const int strided_index = local_index * stride;
508
+ dst[strided_index] = cast<{1}>(src[linear_index]);
509
+ }}
510
+ )METAL_SCATTER";
511
+
512
+ static const char *GATHER_OPS_TEMPLATE = R"METAL_GATHER(
513
+ struct __attribute__ ((packed)) packed_uint5{{
514
+ uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
515
+ }};
516
+
517
+ template<typename Y, typename X>
518
+ Y cast(const X x);
519
+
520
+ template<>
521
+ {1} cast<{1}, {0}>(const {0} x) {{
522
+ return {2};
523
+ }}
524
+
525
+ kernel void gather_kernel_5(uint linear_index [[thread_position_in_grid]],
526
+ constant void * src_ [[buffer(0)]],
527
+ device void * dst_ [[buffer(1)]],
528
+ constant packed_uint5 & size [[buffer(2)]],
529
+ constant packed_uint5 & stride [[buffer(3)]],
530
+ constant uint32_t & numel [[buffer(4)]]) {{
531
+ if (linear_index >= numel) return;
532
+
533
+ constant {0} * src = (constant {0} *)src_;
534
+ device {1} * dst = (device {1} *)dst_;
535
+
536
+
537
+ packed_uint5 local_index;
538
+ local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
539
+ local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
540
+ local_index.z = linear_index / (size.u * size.w) % size.z;
541
+ local_index.w = linear_index / size.u % size.w;
542
+ local_index.u = linear_index % size.u;
543
+
544
+ packed_uint5 strided_index;
545
+ strided_index.x = local_index.x * stride.x;
546
+ strided_index.y = local_index.y * stride.y;
547
+ strided_index.z = local_index.z * stride.z;
548
+ strided_index.w = local_index.w * stride.w;
549
+ strided_index.u = local_index.u * stride.u;
550
+
551
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u]);
552
+ }}
553
+
554
+ kernel void gather_kernel_4(uint linear_index [[thread_position_in_grid]],
555
+ constant void * src_ [[buffer(0)]],
556
+ device void * dst_ [[buffer(1)]],
557
+ constant packed_uint4 & size [[buffer(2)]],
558
+ constant packed_uint4 & stride [[buffer(3)]],
559
+ constant uint32_t & numel [[buffer(4)]]) {{
560
+ if (linear_index >= numel) return;
561
+
562
+ constant {0} * src = (constant {0} *)src_;
563
+ device {1} * dst = (device {1} *)dst_;
564
+
565
+ packed_uint4 local_index;
566
+ local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0];
567
+ local_index.y = linear_index / (size[3] * size[2]) % size[1];
568
+ local_index.z = linear_index / size[3] % size[2];
569
+ local_index.w = linear_index % size[3];
570
+
571
+ const packed_uint4 strided_index = local_index * stride;
572
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z + strided_index.w]);
573
+ }}
574
+
575
+ kernel void gather_kernel_3(uint linear_index [[thread_position_in_grid]],
576
+ constant void * src_ [[buffer(0)]],
577
+ device void * dst_ [[buffer(1)]],
578
+ constant packed_uint3 & size [[buffer(2)]],
579
+ constant packed_uint3 & stride [[buffer(3)]],
580
+ constant uint32_t & numel [[buffer(4)]]) {{
581
+ if (linear_index >= numel) return;
582
+
583
+ constant {0} * src = (constant {0} *)src_;
584
+ device {1} * dst = (device {1} *)dst_;
585
+
586
+ packed_uint3 local_index;
587
+ local_index.x = linear_index / (size[2] * size[1]) % size[0];
588
+ local_index.y = linear_index / size[2] % size[1];
589
+ local_index.z = linear_index % size[2];
590
+
591
+ const packed_uint3 strided_index = local_index * stride;
592
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z]);
593
+ }}
594
+
595
+ kernel void gather_kernel_2(uint linear_index [[thread_position_in_grid]],
596
+ constant void * src_ [[buffer(0)]],
597
+ device void * dst_ [[buffer(1)]],
598
+ constant packed_uint2 & size [[buffer(2)]],
599
+ constant packed_uint2 & stride [[buffer(3)]],
600
+ constant uint32_t & numel [[buffer(4)]]) {{
601
+ if (linear_index >= numel) return;
602
+
603
+ constant {0} * src = (constant {0} *)src_;
604
+ device {1} * dst = (device {1} *)dst_;
605
+
606
+ packed_uint2 local_index;
607
+ local_index.x = linear_index / size[1] % size[0];
608
+ local_index.y = linear_index % size[1];
609
+
610
+ const packed_uint2 strided_index = local_index * stride;
611
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y]);
612
+ }}
613
+
614
+ kernel void gather_kernel_1(uint linear_index [[thread_position_in_grid]],
615
+ constant void * src_ [[buffer(0)]],
616
+ device void * dst_ [[buffer(1)]],
617
+ constant int & size [[buffer(2)]],
618
+ constant int & stride [[buffer(3)]],
619
+ constant uint32_t & numel [[buffer(4)]]) {{
620
+ if (linear_index >= numel) return;
621
+
622
+ constant {0} * src = (constant {0} *)src_;
623
+ device {1} * dst = (device {1} *)dst_;
624
+
625
+ const int local_index = linear_index % size;
626
+ const int strided_index = local_index * stride;
627
+ dst[linear_index] = cast<{1}>(src[strided_index]);
628
+ }}
629
+ )METAL_GATHER";
630
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/mps/MPSAllocatorInterface.h>
6
+ #include <ATen/mps/MPSEvent.h>
7
+ #include <ATen/mps/MPSStream.h>
8
+
9
+ #include <cstdio>
10
+ #include <mutex>
11
+ #include <set>
12
+ #include <unordered_set>
13
+ #include <mach/vm_page_size.h>
14
+ #include <c10/util/flat_hash_map.h>
15
+
16
+ // this implementation is based on CUDACachingAllocator.
17
+ // It utilizes Metal Heaps to improve the performance with buffer allocation.
18
+ // Do not include this header. Use MPSAllocatorInterface.h instead.
19
+ // TODO: Unify the logic with CUDACachingAllocator and remove redundant code.
20
+ namespace at::mps::HeapAllocator {
21
+
22
+ static const size_t kMaxSmallAlloc = MB(1); // largest "small" allocation is 1 MiB
23
+ static const size_t kMinLargeAlloc = MB(10); // allocations between 1 and 10 MiB may use kLargeHeap
24
+ static const size_t kRoundLarge = MB(2); // round up large allocations to 2 MiB
25
+ static const size_t kSmallHeap = MB(8); // "small" allocations are packed in 8 MiB heaps
26
+ static const size_t kLargeHeap = MB(32); // "large" allocations may be packed in 32 MiB heaps
27
+ static const size_t kXLargeHeapD = MB(128); // "extra large" allocations on Discrete devices may be packed in 128 MiB heaps
28
+ static const size_t kXLargeHeapU = MB(1024); // "extra large" allocations on Unified devices may be packed in 1 GiB heaps
29
+ static const size_t kMaxScalarAlloc = (sizeof(int64_t)); // largest "scalar" allocation
30
+
31
+ // buffer pools could be customized with a combination of usage flags
32
+ enum UsageFlags : uint32_t {
33
+ PRIVATE = 0,
34
+ SMALL = (1 << 0), // small heaps have sizes of kSmallHeap, and large ones kLargeHeap
35
+ SHARED = (1 << 1), // shared pools allocated on devices with unified memory; otherwise, private between host/device
36
+ MANAGED = (1 << 2), // managed storage mode
37
+ HAZARD = (1 << 3), // enables Automatic Hazard Tracking for the resources allocated on the pool
38
+ SCALAR = (1 << 4), // used to import CPU scalar values to GPU and use them in MPS Stream
39
+ };
40
+ // debug verbosity flags
41
+ enum DebugVerbosity : uint32_t {
42
+ SILENT = 0,
43
+ PROFILING = (1 << 0), // print generic profiling data for total system memory usage
44
+ ALLOCATIONS = (1 << 1), // print buffer allocations
45
+ RECYCLES = (1 << 2), // print buffer recycling
46
+ RELEASES = (1 << 3), // print buffer releases
47
+ LARGE_ONLY = (1 << 4), // only log large buffer pool transactions
48
+ };
49
+
50
+ struct HeapBlock;
51
+
52
+ struct BufferBlock {
53
+ id<MTLBuffer> buffer;
54
+ void* cpu_ptr = nullptr; // stores the pointer to CPU mapping of a Shared MTLBuffer
55
+ size_t size; // size after alignment
56
+ size_t requested_size; // requested size (before alignment)
57
+ // buffer shape is used for retrieving base of views in cached graphs
58
+ std::vector<int64_t> shape;
59
+ bool in_use = false;
60
+ HeapBlock* heap;
61
+ id_t buf_id;
62
+ // counter to candidate least recently used buffers for garbage collection
63
+ uint32_t gc_count = 0;
64
+ uint32_t use_count = 0;
65
+ // counter to assign unique ids to buffer blocks
66
+ static uint64_t buffer_counter;
67
+ // Metal events used to sync GPU/CPU operations on the shared-storage buffers
68
+ MPSEventPtr event;
69
+
70
+ BufferBlock(size_t Size, size_t RequestedSize = 0, const id<MTLBuffer> Buffer = nullptr,
71
+ HeapBlock* Heap = nullptr) :
72
+ buffer(Buffer), size(Size), requested_size(RequestedSize),
73
+ heap(Heap), buf_id(Buffer ? ++buffer_counter : 0) { }
74
+
75
+ static bool Comparator(const BufferBlock* a, const BufferBlock* b) {
76
+ return (a->size != b->size) ? a->size < b->size : (uintptr_t)a->buffer < (uintptr_t)b->buffer;
77
+ }
78
+ static size_t alignUp(size_t Size, size_t Alignment) {
79
+ assert(((Alignment - 1) & Alignment) == 0);
80
+ return ((Size + Alignment - 1) & ~(Alignment - 1));
81
+ }
82
+ uint32_t retainCount() const { return [buffer retainCount]; }
83
+ };
84
+ typedef bool (*BufferComparison)(const BufferBlock*, const BufferBlock*);
85
+
86
+ struct BufferPool;
87
+ struct AllocParams {
88
+ AllocParams(size_t Alloc_Size, size_t Requested_Size, BufferPool* Pool) :
89
+ search_key(Alloc_Size), pool(Pool), requested_size(Requested_Size) { }
90
+ size_t size() const { return search_key.size; }
91
+
92
+ BufferBlock search_key;
93
+ BufferPool* pool;
94
+ BufferBlock* buffer_block = nullptr;
95
+ size_t requested_size;
96
+ // true if we exceed the low watermark limit. In this case
97
+ // we apply strategies to relieve the pressure before allocation.
98
+ bool has_memory_pressure = false;
99
+ // true if we're allocating on a unified memory device
100
+ bool has_unified_memory = true;
101
+ };
102
+
103
+ struct HeapBlock {
104
+ id<MTLHeap> heap;
105
+ struct { size_t total, available; } size;
106
+ BufferPool* pool;
107
+ unsigned int n_buffers = 0;
108
+ id_t heap_id;
109
+ // indicates if we split this heap to sub-allocate 'several' buffers (otherwise single buffer)
110
+ bool is_split;
111
+ // counter to assign unique ids to heap blocks
112
+ static uint64_t heap_counter;
113
+
114
+ HeapBlock(size_t Size, const id<MTLHeap> Heap = nullptr, BufferPool *Pool = nullptr) :
115
+ heap(Heap), size({.total = Size, .available = Size}), pool(Pool),
116
+ heap_id(Heap ? ++heap_counter : 0), is_split(true) { }
117
+
118
+ static MTLResourceOptions getOptions(uint32_t usage) {
119
+ // TODO: check the caching performance of write-combined mode
120
+ MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache;
121
+
122
+ if (usage & UsageFlags::MANAGED)
123
+ options |= MTLResourceStorageModeManaged;
124
+ else if (usage & UsageFlags::SHARED)
125
+ options |= MTLResourceStorageModeShared;
126
+ else
127
+ options |= MTLResourceStorageModePrivate;
128
+
129
+ options |= (usage & UsageFlags::HAZARD) ? MTLResourceHazardTrackingModeTracked : MTLResourceHazardTrackingModeUntracked;
130
+
131
+ return options;
132
+ }
133
+
134
+ static HeapBlock* createHeapBlock(AllocParams& params, id<MTLDevice> device, uint32_t usage) {
135
+ HeapBlock *heapBlock = nullptr;
136
+ bool is_split = true;
137
+ const size_t size = params.size();
138
+ MTLHeapDescriptor *d = [MTLHeapDescriptor new];
139
+ if (d) {
140
+ const size_t kXLargeHeap = params.has_unified_memory ? kXLargeHeapU : kXLargeHeapD;
141
+ if (size <= kMaxSmallAlloc) {
142
+ d.size = kSmallHeap;
143
+ } else if (size < kMinLargeAlloc) {
144
+ d.size = kLargeHeap;
145
+ } else if (size < kXLargeHeap / 2 && !params.has_memory_pressure) {
146
+ d.size = kXLargeHeap;
147
+ } else {
148
+ d.size = kRoundLarge * ((size + kRoundLarge - 1) / kRoundLarge);
149
+ is_split = false;
150
+ }
151
+ d.storageMode = (usage & UsageFlags::SHARED) ? MTLStorageModeShared : MTLStorageModePrivate;
152
+ d.cpuCacheMode = MTLCPUCacheModeDefaultCache;
153
+ // this automatically handles Metal buffer access synchronizations at the
154
+ // cost of slightly lower performance.
155
+ d.hazardTrackingMode = (usage & UsageFlags::HAZARD) ? MTLHazardTrackingModeTracked : MTLHazardTrackingModeUntracked;
156
+ d.resourceOptions = getOptions(usage);
157
+ d.type = MTLHeapTypeAutomatic;
158
+ id<MTLHeap> heap = [device newHeapWithDescriptor: d];
159
+ if (heap) {
160
+ [heap setPurgeableState:MTLPurgeableStateNonVolatile];
161
+ const size_t heap_size = heapAvailableSize(heap);
162
+ heapBlock = new HeapBlock(heap_size, heap, params.pool);
163
+ if (heapBlock) {
164
+ heapBlock->is_split = is_split;
165
+ }
166
+ }
167
+ [d release];
168
+ }
169
+ return heapBlock;
170
+ }
171
+ static bool Comparator(const HeapBlock* a, const HeapBlock* b) {
172
+ return (a->size.available != b->size.available) ? a->size.available < b->size.available :
173
+ (uintptr_t)a->heap < (uintptr_t)b->heap;
174
+ }
175
+ static NSUInteger heapAvailableSize(id<MTLHeap> heap, size_t Alignment = vm_page_size) {
176
+ return [heap maxAvailableSizeWithAlignment:Alignment];
177
+ }
178
+ NSUInteger Size() {
179
+ return [heap size];
180
+ }
181
+ id<MTLBuffer> newMTLBuffer(size_t length, uint32_t usage) {
182
+ id<MTLBuffer> buf = [heap newBufferWithLength:length options:getOptions(usage)];
183
+ if (buf) {
184
+ updateAvailableSize();
185
+ n_buffers++;
186
+ }
187
+ return buf;
188
+ }
189
+ // returns the retainCount before releasing the buffer
190
+ uint32_t releaseMTLBuffer(id<MTLBuffer>& buffer) {
191
+ const uint32_t retainCount = [buffer retainCount];
192
+ [buffer release];
193
+ buffer = nil;
194
+ updateAvailableSize();
195
+ n_buffers--;
196
+ return retainCount;
197
+ }
198
+ // returns the retainCount before releasing the heap
199
+ uint32_t releaseMTLHeap() {
200
+ const uint32_t retainCount = [heap retainCount];
201
+ TORCH_INTERNAL_ASSERT(!n_buffers); // assert if heap isn't empty
202
+ [heap setPurgeableState:MTLPurgeableStateEmpty];
203
+ [heap release];
204
+ heap = nil;
205
+ size.available = 0;
206
+ return retainCount;
207
+ }
208
+ uint32_t retainCount() const { return [heap retainCount]; }
209
+ void updateAvailableSize() { size.available = heapAvailableSize(heap); }
210
+ };
211
+ typedef bool (*HeapComparison)(const HeapBlock*, const HeapBlock*);
212
+
213
+ struct BufferPool {
214
+ enum class Kind {
215
+ PRIVATE_SMALL,
216
+ PRIVATE_LARGE,
217
+ SHARED_SMALL,
218
+ SHARED_LARGE,
219
+ SCALAR,
220
+ };
221
+
222
+ BufferPool(const id<MTLDevice> Device, uint32_t Usage) :
223
+ device(Device), usage(Usage),
224
+ heaps(HeapBlock::Comparator), available_buffers(BufferBlock::Comparator) { }
225
+
226
+ const id<MTLDevice> device;
227
+ // usage flags to customize the pool for various purposes (see UsageFlags enum)
228
+ const uint32_t usage;
229
+ // total number of buffers in the pool
230
+ uint32_t n_buffers = 0;
231
+ // total allocations size on this pool
232
+ size_t allocated_size = 0;
233
+ // total memory available in the pool
234
+ size_t available_size = 0;
235
+ // list of heaps ordered by their "available" (not total) memory size
236
+ std::set<HeapBlock*, HeapComparison> heaps;
237
+ // list of only "available" buffers in the pool (i.e., buffers not in-use)
238
+ std::set<BufferBlock*, BufferComparison> available_buffers;
239
+ // list of buffers that are in a state of "limbo" where they've already been freed
240
+ // from PyTorch-side, but were not returned to pool due to still being
241
+ // in-use by command buffers with retainCount > 1. In this state, the buffer is
242
+ // neither ready to be recycled, nor could be returned to pool as available.
243
+ // These buffers will be returned to pool once the command buffer's
244
+ // completionHandler callbacks are called.
245
+ std::unordered_set<BufferBlock*> buffers_pending_free;
246
+ // list of heaps pending size update
247
+ std::unordered_set<HeapBlock*> heaps_pending_update;
248
+ };
249
+
250
+ class MPSHeapAllocatorImpl {
251
+ public:
252
+ explicit MPSHeapAllocatorImpl() :
253
+ m_device(at::mps::MPSDevice::getInstance()->device()),
254
+ m_max_buffer_size([m_device maxBufferLength]),
255
+ m_stream(getDefaultMPSStream()),
256
+ m_event_pool(getMPSEventPool()) {
257
+ init_allocator();
258
+ }
259
+ ~MPSHeapAllocatorImpl() {
260
+ emptyCache();
261
+ }
262
+ // interface exposed to at::Allocator
263
+ id<MTLBuffer> malloc(size_t size, uint32_t usage);
264
+ // frees a buffer and returns it into buffer pool
265
+ void free(void* ptr);
266
+ // releases all the cached buffers and their associated heaps
267
+ void emptyCache();
268
+ // free inactive buffers that are pending to be freed
269
+ void freeInactiveBuffers();
270
+ // returns true if buffer was allocated from the shared pool
271
+ bool isSharedBuffer(const void* ptr);
272
+ // get the requested unaligned size of an MTLBuffer
273
+ ssize_t getUnalignedBufferSize(const void* ptr);
274
+ // set the shape of a base tensor from a view tensor
275
+ void setBufferShape(const void* ptr, const IntArrayRef& shape);
276
+ // retrieve the shape of a base tensor from a view tensor
277
+ IntArrayRef getBufferShape(const void* ptr);
278
+ // get the unique ID of the buffer
279
+ id_t getBufferId(const void* ptr);
280
+ // allocate a buffer from a specialized pool to import CPU scalars into GPU
281
+ id<MTLBuffer> allocScalarBufferWithValue(void* value, size_t size);
282
+ // returns a CPU-mapping of the input buffer and its retainCount,
283
+ // if only it has Shared storage-mode and allocated on MPSAllocator
284
+ std::pair<const void*, uint32_t> getSharedBufferPtr(const void* buffer);
285
+ // records events for a list of MTLBuffers (list is used to lock the mutex once)
286
+ // returns true if records any event (given if passed buffers exist and are shared-storage)
287
+ bool recordEvents(c10::ArrayRef<const void*> buffers);
288
+ // waits for the event to signal the completion of GPU execution
289
+ // on the passed shared buffers (list is used to lock the mutex once)
290
+ // returns true if actually waited on any event
291
+ bool waitForEvents(c10::ArrayRef<const void*> buffers);
292
+ // this indicates how far (in Megabytes) the current total allocations are from the
293
+ // low watermark limit which is used to detect if we're under memory pressure
294
+ // This returns zero if we've reached the low watermark limit
295
+ ssize_t getLowWatermarkValue();
296
+ // (see m_low_watermark_ratio for description)
297
+ void setLowWatermarkRatio(double ratio);
298
+ // (see m_high_watermark_ratio for description)
299
+ void setHighWatermarkRatio(double ratio);
300
+ // (see m_low_watermark_limit for description)
301
+ size_t getLowWatermarkLimit() const { return m_low_watermark_limit; }
302
+ // (see m_max_total_allowed_size for description)
303
+ size_t getHighWatermarkLimit() const { return m_max_total_allowed_size; }
304
+ // (see m_total_allocated_memory for description)
305
+ size_t getTotalAllocatedMemory() const { return m_total_allocated_memory; }
306
+ // (see m_current_allocated_memory for description)
307
+ size_t getCurrentAllocatedMemory() const { return m_current_allocated_memory; }
308
+ // total GPU memory allocated in the process by Metal driver; including
309
+ // implicit allocations from MPS/MPSGraph frameworks and MPSHeapAllocatorImpl.
310
+ size_t getDriverAllocatedMemory() const { return current_allocated_size(); }
311
+ // (see enum DebugVerbosity for description)
312
+ uint32_t getDebugVerbosity() const { return m_debug_verbosity; }
313
+ // returns the device that we allocate from
314
+ inline id<MTLDevice> Device() const { return m_device; }
315
+
316
+ // TODO: make a common function to do size unit conversions in PyTorch.
317
+ inline std::string format_size(uint64_t size) const;
318
+
319
+ private:
320
+ // (see m_high_watermark_ratio for description)
321
+ constexpr static double default_high_watermark_ratio = 1.7;
322
+ // we set the allowed upper bound to twice the size of recommendedMaxWorkingSetSize.
323
+ constexpr static double default_high_watermark_upper_bound = 2.0;
324
+ // (see m_low_watermark_ratio for description)
325
+ // on unified memory, we could allocate beyond the recommendedMaxWorkingSetSize
326
+ constexpr static double default_low_watermark_ratio_unified = 1.4;
327
+ constexpr static double default_low_watermark_ratio_discrete = 1.0;
328
+
329
+ const id<MTLDevice> m_device;
330
+ std::recursive_mutex m_mutex;
331
+ // allocated buffers by device pointer
332
+ ska::flat_hash_map<const void*, BufferBlock*> m_allocated_buffers;
333
+ // using a container for pools to simplify iterating them
334
+ ska::flat_hash_map<BufferPool::Kind, std::unique_ptr<BufferPool>> m_pools;
335
+ // total memory allocated by HeapAllocator (including blocks in pools)
336
+ size_t m_total_allocated_memory = 0;
337
+ // currently active memory allocations in use (i.e., blocks not in pools)
338
+ size_t m_current_allocated_memory = 0;
339
+ // max buffer size allowed by Metal
340
+ size_t m_max_buffer_size = 0;
341
+ // maximum total size allowed to be allocated
342
+ size_t m_max_total_allowed_size = 0;
343
+ // high watermark ratio is a hard limit for the total allowed allocations
344
+ // 0. : disables high watermark limit (may cause system failure if system-wide OOM occurs)
345
+ // 1. : recommended maximum allocation size (i.e., device.recommendedMaxWorkingSetSize)
346
+ // >1.: allows limits beyond the device.recommendedMaxWorkingSetSize
347
+ // e.g., value 0.95 means we allocate up to 95% of recommended maximum
348
+ // allocation size; beyond that, the allocations would fail with OOM error.
349
+ double m_high_watermark_ratio;
350
+ // low watermark ratio is a soft limit to attempt limiting memory allocations up to the lower watermark
351
+ // level by garbage collection or committing command buffers more frequently (a.k.a, adaptive commit).
352
+ // Value between 0 to m_high_watermark_ratio (setting 0.0 disables adaptive commit and garbage collection)
353
+ // e.g., value 0.9 means we 'attempt' to limit allocations up to 90% of recommended maximum
354
+ // allocation size.
355
+ double m_low_watermark_ratio;
356
+ // low watermark size limit (in Bytes) at the time we initialize the allocator
357
+ size_t m_low_watermark_limit;
358
+ // use "PYTORCH_DEBUG_MPS_ALLOCATOR" env-var to set debug verbosity
359
+ uint32_t m_debug_verbosity;
360
+ // default MPS stream
361
+ MPSStream* m_stream;
362
+ // we hold a reference to MPSEventPool so it could get destroyed after MPSAllocator
363
+ std::shared_ptr<MPSEventPool> m_event_pool;
364
+
365
+ void init_allocator();
366
+ void init_buffer_pools();
367
+ HeapBlock* get_free_heap(AllocParams& params);
368
+ bool get_free_buffer(AllocParams& params);
369
+ BufferBlock* get_allocated_buffer_block(const void* ptr);
370
+ BufferBlock* alloc_buffer_block(size_t size, uint32_t usage);
371
+ bool alloc_buffer(AllocParams& params);
372
+ void free_buffer(BufferBlock* buffer_block);
373
+ // returns true if the container heap is also released
374
+ bool release_buffer(BufferBlock* buffer_block, bool remove_empty_heap = true);
375
+ void release_buffers(BufferPool& pool);
376
+ bool release_available_cached_buffers(AllocParams& params);
377
+ bool release_cached_buffers();
378
+ // free unused cached blocks to reclaim GPU memory if memory pressure is high
379
+ void garbage_collect_cached_buffers(AllocParams& params);
380
+ // returns the suitable buffer pool type for the usage or
381
+ // requested/allocated sizes
382
+ BufferPool& get_pool(size_t requested_size, size_t aligned_size, uint32_t usage);
383
+ // returns the aligned allocation size that is optimized
384
+ // for the buffers to get reused frequently
385
+ size_t get_allocation_size(size_t size, uint32_t usage) const;
386
+ // maximum size of device memory available for allocation in current process
387
+ // Note: the recommendedMaxWorkingSetSize is typically 75% of the total system memory.
388
+ size_t max_device_size() const { return [m_device recommendedMaxWorkingSetSize]; }
389
+ // there are implicit allocations from MPS backend, so we need to query the 'device' for
390
+ // total allocated size instead of manually tracking in MPSAllocator
391
+ size_t current_allocated_size() const { return [m_device currentAllocatedSize]; }
392
+
393
+ bool trigger_memory_callbacks(BufferBlock* buffer_block, IMpsAllocatorCallback::EventType event) const {
394
+ for (const auto& name : MPSAllocatorCallbacksRegistry()->Keys()) {
395
+ MPSAllocatorCallbacksRegistry()->Create(name)->executeMPSAllocatorCallback(buffer_block ? buffer_block->buffer : nullptr, event);
396
+ }
397
+ return true;
398
+ }
399
+ };
400
+
401
+ } // namespace at::mps::HeapAllocator
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2023 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <c10/core/Allocator.h>
6
+ #include <c10/util/Registry.h>
7
+ #include <ATen/core/ATen_fwd.h>
8
+
9
+ #define MB(x) (x * 1048576UL)
10
+
11
+ namespace at::mps {
12
+
13
+ // this is a public interface to access MPSAllocator.
14
+ // Do not declare methods that would depend on MPS or Metal frameworks.
15
+ class IMPSAllocator : public c10::Allocator {
16
+ public:
17
+ // see the comments in MPSAllocator.h for the description of these methods.
18
+ virtual void emptyCache() const = 0;
19
+ virtual void freeInactiveBuffers() const = 0;
20
+ virtual ssize_t getUnalignedBufferSize(const void* ptr) const = 0;
21
+ virtual IntArrayRef getBufferShape(const void* ptr) const = 0;
22
+ virtual id_t getBufferId(const void* ptr) const = 0;
23
+ virtual void setBufferShape(const void* ptr, const IntArrayRef& shape) const = 0;
24
+ virtual bool isSharedBuffer(const void* ptr) const = 0;
25
+ virtual bool isSharedStorageSupported() const = 0;
26
+ virtual c10::DataPtr allocScalarBufferWithValue(void* value, size_t size) const = 0;
27
+ virtual std::string formatSize(size_t size) const = 0;
28
+ virtual void setLowWatermarkRatio(double ratio) const = 0;
29
+ virtual void setHighWatermarkRatio(double ratio) const = 0;
30
+ virtual ssize_t getLowWatermarkValue() const = 0;
31
+ virtual size_t getLowWatermarkLimit() const = 0;
32
+ virtual size_t getHighWatermarkLimit() const = 0;
33
+ virtual size_t getTotalAllocatedMemory() const = 0;
34
+ virtual size_t getCurrentAllocatedMemory() const = 0;
35
+ virtual size_t getDriverAllocatedMemory() const = 0;
36
+ virtual std::pair<const void*, uint32_t> getSharedBufferPtr(const void* ptr) const = 0;
37
+ virtual bool recordEvents(c10::ArrayRef<const void*> buffers) const = 0;
38
+ virtual bool waitForEvents(c10::ArrayRef<const void*> buffers) const = 0;
39
+ };
40
+
41
+ class IMpsAllocatorCallback {
42
+ public:
43
+ enum class EventType {
44
+ ALLOCATED, // buffer got allocated to be used immediately
45
+ RECYCLED, // buffer pulled from free list to be reused
46
+ FREED, // buffer put to free list for future recycling
47
+ RELEASED, // buffer memory released
48
+ ALLOCATION_FAILED // buffer allocation failed
49
+ };
50
+ virtual ~IMpsAllocatorCallback() = default;
51
+ virtual void executeMPSAllocatorCallback(void* ptr, EventType event) = 0;
52
+ };
53
+
54
+ // MPS allocator will execute every registered callback when a block of memory is freed.
55
+ C10_DECLARE_REGISTRY(MPSAllocatorCallbacksRegistry, IMpsAllocatorCallback);
56
+ #define REGISTER_MPS_ALLOCATOR_CALLBACK(name, ...) \
57
+ C10_REGISTER_CLASS(MPSAllocatorCallbacksRegistry, name, __VA_ARGS__);
58
+
59
+ IMPSAllocator* getIMPSAllocator(bool sharedAllocator = false);
60
+
61
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <c10/core/Allocator.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+
9
+ #ifdef __OBJC__
10
+ #include <Foundation/Foundation.h>
11
+ #include <Metal/Metal.h>
12
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
13
+ typedef id<MTLDevice> MTLDevice_t;
14
+ typedef id<MTLLibrary> MTLLibrary_t;
15
+ typedef id<MTLComputePipelineState> MTLComputePipelineState_t;
16
+ typedef id<MTLLibrary> MTLLibrary_t;
17
+ #else
18
+ typedef void* MTLDevice;
19
+ typedef void* MTLDevice_t;
20
+ typedef void* MTLLibrary_t;
21
+ typedef void* MTLComputePipelineState_t;
22
+ typedef void* MTLLibrary_t;
23
+ #endif
24
+
25
+ using namespace std;
26
+
27
+ namespace at::mps {
28
+
29
+ // Helper enum to check if a MPSGraph op is supported in a given macOS version
30
+ enum class MacOSVersion : uint32_t {
31
+ MACOS_VER_13_0_PLUS = 0,
32
+ MACOS_VER_13_1_PLUS,
33
+ MACOS_VER_13_2_PLUS,
34
+ MACOS_VER_13_3_PLUS,
35
+ MACOS_VER_14_0_PLUS,
36
+ };
37
+
38
+ //-----------------------------------------------------------------
39
+ // MPSDevice
40
+ //
41
+ // MPSDevice is a singleton class that returns the default device
42
+ //-----------------------------------------------------------------
43
+
44
+ class TORCH_API MPSDevice {
45
+ public:
46
+ /**
47
+ * MPSDevice should not be cloneable.
48
+ */
49
+ MPSDevice(MPSDevice& other) = delete;
50
+ /**
51
+ * MPSDevice should not be assignable.
52
+ */
53
+ void operator=(const MPSDevice&) = delete;
54
+ /**
55
+ * Gets single instance of the Device.
56
+ */
57
+ static MPSDevice* getInstance();
58
+ /**
59
+ * Returns the single device.
60
+ */
61
+ MTLDevice_t device() {
62
+ return _mtl_device;
63
+ }
64
+ /**
65
+ * Returns whether running on Ventura or newer
66
+ */
67
+ bool isMacOS13Plus(MacOSVersion version) const;
68
+
69
+ MTLComputePipelineState_t metalIndexingPSO(const std::string &kernel);
70
+ MTLLibrary_t getMetalIndexingLibrary();
71
+
72
+ ~MPSDevice();
73
+
74
+ private:
75
+ static MPSDevice* _device;
76
+ MTLDevice_t _mtl_device;
77
+ MTLLibrary_t _mtl_indexing_library;
78
+ MPSDevice();
79
+ };
80
+
81
+ TORCH_API bool is_available();
82
+ TORCH_API bool is_macos_13_or_newer(MacOSVersion version = MacOSVersion::MACOS_VER_13_0_PLUS);
83
+ TORCH_API at::Allocator* GetMPSAllocator(bool useSharedAllocator = false);
84
+
85
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2023 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/mps/MPSStream.h>
6
+ #include <ctime>
7
+ #include <stack>
8
+
9
+ namespace at::mps {
10
+
11
+ // NOTE: don't create instances of this class directly.
12
+ // Use MPSEventPool to acquire instances of MPSEvent.
13
+ class MPSEvent {
14
+ public:
15
+ explicit MPSEvent(id_t ID, MPSStream* stream, bool enable_timing);
16
+ ~MPSEvent();
17
+
18
+ // records an event on the stream
19
+ void record(bool needsLock, bool syncEvent = false);
20
+ // makes all future work submitted to the stream wait for this event.
21
+ bool wait(bool needsLock, bool syncEvent = false);
22
+ // schedules a notifyListener callback for the event.
23
+ bool notify(bool needsLock, MTLSharedEventNotificationBlock block);
24
+ // checks if events are already signaled.
25
+ bool query() const;
26
+ // blocks the CPU thread until all the GPU work that were scheduled
27
+ // prior to recording this event are completed.
28
+ bool synchronize();
29
+ // resets this event with new parameters in case it gets reused from the event pool
30
+ void reset(MPSStream* stream, bool enable_timing);
31
+ // returns the unique ID of the event instance
32
+ id_t getID() const { return m_id; }
33
+ // returns the completion timestamp of the event
34
+ uint64_t getCompletionTime() const { return m_completion_time; }
35
+ // if already recorded, waits for cpu_sync_cv to be signaled
36
+ void waitForCpuSync();
37
+
38
+ private:
39
+ id_t m_id;
40
+ // enables measuring the completion time of the notifyListener of this event
41
+ bool m_enable_timing;
42
+ uint64_t m_signalCounter = 0;
43
+ MPSStream* m_stream = nullptr;
44
+ MTLSharedEvent_t m_event = nullptr;
45
+ MTLSharedEventListener* m_listener = nullptr;
46
+ // used to sync the events created on this Stream with CPU
47
+ std::mutex m_cpu_sync_mutex{};
48
+ std::condition_variable m_cpu_sync_cv{};
49
+ // CondVar predicate to sync the events created on this Stream with CPU
50
+ bool m_cpu_sync_completed = false;
51
+ // used to compute elapsed time
52
+ uint64_t m_completion_time = 0;
53
+
54
+ void recordLocked(bool syncEvent);
55
+ bool waitLocked(bool syncEvent);
56
+ bool notifyLocked(MTLSharedEventNotificationBlock block);
57
+ void notifyCpuSync();
58
+ static uint64_t getTime() {
59
+ return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
60
+ }
61
+ };
62
+
63
+ typedef std::unique_ptr<MPSEvent, std::function<void(MPSEvent*)>> MPSEventPtr;
64
+
65
+ class MPSEventPool {
66
+ public:
67
+ explicit MPSEventPool(MPSStream* default_stream);
68
+ ~MPSEventPool();
69
+
70
+ MPSEventPtr acquireEvent(bool enable_timing, MPSStream* stream);
71
+ void emptyCache();
72
+
73
+ // these are mainly used for MPSHooks and torch.mps.Event() bindings
74
+ id_t acquireEvent(bool enable_timing);
75
+ void releaseEvent(id_t event_id);
76
+ void recordEvent(id_t event_id, bool syncEvent);
77
+ void waitForEvent(id_t event_id, bool syncEvent);
78
+ void synchronizeEvent(id_t event_id);
79
+ bool queryEvent(id_t event_id);
80
+ // returns elapsed time between two recorded events in milliseconds
81
+ double elapsedTime(id_t start_event_id, id_t end_event_id);
82
+
83
+ private:
84
+ MPSStream* m_default_stream = nullptr;
85
+ std::recursive_mutex m_mutex;
86
+ std::stack<std::unique_ptr<MPSEvent>> m_pool{};
87
+ // dictionary to associate event IDs with event objects
88
+ // used to retain in-use events out of the pool
89
+ // for torch.mps.Event() bindings.
90
+ std::unordered_map<id_t, MPSEventPtr> m_in_use_events{};
91
+ uint64_t m_event_counter = 0;
92
+ std::function<void(MPSEvent*)> m_default_deleter;
93
+
94
+ MPSEvent* getInUseEvent(id_t event_id, bool locked = true);
95
+ };
96
+
97
+ // shared_ptr is used to get MPSEventPool destroyed after dependent instances
98
+ std::shared_ptr<MPSEventPool> getMPSEventPool();
99
+
100
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/core/Generator.h>
6
+ #include <ATen/core/PhiloxRNGEngine.h>
7
+ #include <c10/core/GeneratorImpl.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace at {
11
+ namespace mps::detail {
12
+
13
+ static const uint32_t PHILOX_STATE_N = 7;
14
+ struct rng_data_pod {
15
+ std::array<uint32_t, PHILOX_STATE_N> state{1};
16
+ uint64_t seed = default_rng_seed_val;
17
+ };
18
+
19
+ TORCH_API const Generator& getDefaultMPSGenerator();
20
+ TORCH_API Generator createMPSGenerator(uint64_t seed_val = default_rng_seed_val);
21
+
22
+ } // namespace mps::detail
23
+
24
+ struct TORCH_API MPSGeneratorImpl : public c10::GeneratorImpl {
25
+ // Constructors
26
+ MPSGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
27
+ ~MPSGeneratorImpl() override = default;
28
+
29
+ // MPSGeneratorImpl methods
30
+ std::shared_ptr<MPSGeneratorImpl> clone() const;
31
+ void set_current_seed(uint64_t seed) override;
32
+ void set_offset(uint64_t offset) override;
33
+ uint64_t get_offset() const override;
34
+ uint64_t current_seed() const override;
35
+ uint64_t seed() override;
36
+ void set_state(const c10::TensorImpl& new_state) override;
37
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
38
+ void update_philox_counters();
39
+
40
+ void set_engine(at::Philox4_32 engine) { engine_ = engine; };
41
+ at::Philox4_32 engine() { return engine_; };
42
+ uint32_t* state_data() { return data_.state.data(); }
43
+ static DeviceType device_type() { return DeviceType::MPS; };
44
+
45
+ private:
46
+ mps::detail::rng_data_pod data_;
47
+ at::Philox4_32 engine_;
48
+
49
+ MPSGeneratorImpl* clone_impl() const override;
50
+ };
51
+
52
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <ATen/Context.h>
8
+ #include <ATen/mps/MPSStream.h>
9
+ #include <ATen/mps/MPSEvent.h>
10
+
11
+ #ifdef __OBJC__
12
+ #include <Foundation/Foundation.h>
13
+ #include <Metal/Metal.h>
14
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
15
+ #endif
16
+
17
+ #include <ATen/Tensor.h>
18
+ #include <c10/core/MemoryFormat.h>
19
+ #include <c10/core/Storage.h>
20
+ #include <c10/core/TensorImpl.h>
21
+ #include <sys/_types/_size_t.h>
22
+ #include <memory>
23
+ #include <c10/core/UndefinedTensorImpl.h>
24
+ #include <c10/util/intrusive_ptr.h>
25
+
26
+
27
+ namespace at::mps {
28
+
29
+ typedef MPSEvent* mpsEvent_t;
30
+
31
+ // TODO: Move the MPSGuardImpl to inherit from NoOpDeviceGuardImpl
32
+ // https://github.com/pytorch/pytorch/issues/77170
33
+ struct TORCH_API MPSGuardImpl final : public c10::impl::DeviceGuardImplInterface {
34
+ static constexpr c10::DeviceType static_type = c10::DeviceType::MPS;
35
+
36
+ // constructor
37
+ MPSGuardImpl() {}
38
+ explicit MPSGuardImpl(c10::DeviceType t) {
39
+ TORCH_INTERNAL_ASSERT(t == c10::DeviceType::MPS);
40
+ }
41
+
42
+ // returns the type
43
+ c10::DeviceType type() const override {
44
+ return c10::DeviceType::MPS;
45
+ }
46
+
47
+ Device exchangeDevice(Device d) const override {
48
+ return Device(c10::DeviceType::MPS, 0);
49
+ }
50
+
51
+ Device getDevice() const override {
52
+ return Device(c10::DeviceType::MPS, 0);
53
+ }
54
+
55
+ c10::optional<Device> uncheckedGetDevice() const noexcept {
56
+ return Device(c10::DeviceType::MPS, 0);
57
+ }
58
+
59
+ void setDevice(Device d) const override {
60
+ TORCH_INTERNAL_ASSERT(d.is_mps());
61
+ }
62
+
63
+ void uncheckedSetDevice(Device d) const noexcept override {
64
+ // TODO: Currently setting only device 0
65
+ }
66
+
67
+ Stream getStream(Device d) const noexcept override {
68
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
69
+ }
70
+
71
+ Stream getDefaultStream(Device d) const override {
72
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
73
+ }
74
+
75
+ // NB: These do NOT set the current device
76
+ Stream exchangeStream(Stream s) const noexcept override {
77
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
78
+ }
79
+ DeviceIndex deviceCount() const noexcept override {
80
+ if (at::hasMPS()) {
81
+ //TODO: extend it for multi-device case
82
+ return 1;
83
+ } else {
84
+ return 0;
85
+ }
86
+ }
87
+
88
+ // Event-related functions
89
+ void createEvent(
90
+ mpsEvent_t* event,
91
+ const EventFlag flag) const;
92
+
93
+ void destroyEvent(
94
+ void* event,
95
+ const DeviceIndex device_index) const noexcept override;
96
+
97
+ void record(
98
+ void** event,
99
+ const Stream& stream,
100
+ const DeviceIndex device_index,
101
+ const EventFlag flag) const override;
102
+
103
+ void block(
104
+ void* event,
105
+ const Stream& stream) const override;
106
+
107
+ bool queryEvent(void* event) const override;
108
+
109
+ };
110
+
111
+ /// A variant of OptionalDeviceGuard that is specialized for MPS.
112
+ struct OptionalMPSGuard {
113
+ explicit OptionalMPSGuard() : guard_() {}
114
+
115
+ explicit OptionalMPSGuard(c10::optional<Device> device_opt)
116
+ : guard_(device_opt) {}
117
+
118
+ /// Set the current MPS device to the passed device index, if it is not
119
+ /// nullopt
120
+ explicit OptionalMPSGuard(c10::optional<DeviceIndex> device_index_opt)
121
+ : guard_(device_index_opt) {}
122
+
123
+ // Copy is not allowed
124
+ OptionalMPSGuard(const OptionalMPSGuard&) = delete;
125
+ OptionalMPSGuard& operator=(const OptionalMPSGuard&) = delete;
126
+ OptionalMPSGuard(OptionalMPSGuard&& other) = delete;
127
+ OptionalMPSGuard& operator=(OptionalMPSGuard&& other) = delete;
128
+
129
+ /// Sets the MPS device to the given device, initializing the guard if it
130
+ /// is not already initialized. Errors if the given device is not a MPS
131
+ /// device.
132
+ void set_device(Device device) {
133
+ guard_.set_device(device);
134
+ }
135
+
136
+ /// Sets the MPS device to the given device, initializing the guard if it is
137
+ /// not already initialized. Errors if the given device is not a MPS device.
138
+ void reset_device(Device device) {
139
+ guard_.reset_device(device);
140
+ }
141
+
142
+ /// Sets the MPS device to the given device index, initializing the guard if
143
+ /// it is not already initialized.
144
+ void set_index(DeviceIndex device_index) {
145
+ guard_.set_index(device_index);
146
+ }
147
+
148
+ /// Returns the device that was set immediately prior to initialization of the
149
+ /// guard, or nullopt if the guard is uninitialized.
150
+ c10::optional<Device> original_device() const {
151
+ return guard_.original_device();
152
+ }
153
+
154
+ /// Returns the most recent device that was set using this device guard,
155
+ /// either from construction, or via set_device, if the guard is initialized,
156
+ /// or nullopt if the guard is uninitialized.
157
+ c10::optional<Device> current_device() const {
158
+ return guard_.current_device();
159
+ }
160
+
161
+ /// Restore the original MPS device, resetting this guard to uninitialized
162
+ /// state.
163
+ void reset() {
164
+ guard_.reset();
165
+ }
166
+
167
+ private:
168
+ c10::impl::InlineOptionalDeviceGuard<MPSGuardImpl> guard_;
169
+ };
170
+
171
+
172
+ C10_REGISTER_GUARD_IMPL(MPS, MPSGuardImpl);
173
+
174
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/detail/MPSHooksInterface.h>
6
+ #include <ATen/Generator.h>
7
+ #include <ATen/mps/MPSEvent.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace at::mps {
11
+
12
+ // The real implementation of MPSHooksInterface
13
+ struct MPSHooks : public at::MPSHooksInterface {
14
+ MPSHooks(at::MPSHooksArgs) {}
15
+ void initMPS() const override;
16
+
17
+ // MPSDevice interface
18
+ bool hasMPS() const override;
19
+ bool isOnMacOSorNewer(unsigned major, unsigned minor) const override;
20
+
21
+ // MPSGeneratorImpl interface
22
+ const Generator& getDefaultMPSGenerator() const override;
23
+
24
+ // MPSStream interface
25
+ void deviceSynchronize() const override;
26
+ void commitStream() const override;
27
+ void* getCommandBuffer() const override;
28
+ void* getDispatchQueue() const override;
29
+
30
+ // MPSAllocator interface
31
+ Allocator* getMPSDeviceAllocator() const override;
32
+ void emptyCache() const override;
33
+ size_t getCurrentAllocatedMemory() const override;
34
+ size_t getDriverAllocatedMemory() const override;
35
+ void setMemoryFraction(double ratio) const override;
36
+
37
+ // MPSProfiler interface
38
+ void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const override;
39
+ void profilerStopTrace() const override;
40
+
41
+ // MPSEvent interface
42
+ uint32_t acquireEvent(bool enable_timing) const override;
43
+ void releaseEvent(uint32_t event_id) const override;
44
+ void recordEvent(uint32_t event_id) const override;
45
+ void waitForEvent(uint32_t event_id) const override;
46
+ void synchronizeEvent(uint32_t event_id) const override;
47
+ bool queryEvent(uint32_t event_id) const override;
48
+ double elapsedTimeOfEvents(uint32_t start_event_id, uint32_t end_event_id) const override;
49
+
50
+ // Compatibility with Accelerator API
51
+ bool hasPrimaryContext(DeviceIndex device_index) const override {
52
+ // When MPS is available, it is always in use for the one device.
53
+ return true;
54
+ }
55
+ };
56
+
57
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/mps/MPSStream.h>
7
+ #include <ATen/mps/MPSAllocatorInterface.h>
8
+
9
+ #include <os/signpost.h>
10
+ #include <os/log.h>
11
+
12
+ #include <sstream>
13
+ #include <string>
14
+ #include <atomic>
15
+ #include <unordered_map>
16
+ #include <utility>
17
+ #include <ctime>
18
+
19
+ namespace at::mps {
20
+
21
+ namespace Profiler {
22
+
23
+ struct BaseInfo {
24
+ // profiling info types
25
+ enum class Type {
26
+ GRAPH,
27
+ KERNEL,
28
+ COPY,
29
+ CPU_FALLBACK,
30
+ };
31
+
32
+ BaseInfo(Type infoType, uint64_t Id, const uintptr_t Handle) :
33
+ type(infoType), profileId(Id), handle(Handle) { }
34
+ virtual ~BaseInfo() = default;
35
+
36
+ // type of profiling info
37
+ Type type;
38
+ // unique profile ID for execution instances of operations or copies
39
+ uint64_t profileId;
40
+ // ID generated by os_signpost
41
+ // since it's possible to use event and interval-based signposts at the
42
+ // same time, we need separate IDs for each.
43
+ os_signpost_id_t eventSignpostId = 0, intervalSignpostId = 0;
44
+ // accumulated GPU time in ms (obtained from CompletionHandler's "GPUEndTime - GPUStartTime")
45
+ std::atomic<double> totalGpuTime{0.0};
46
+ // accumulated Scheduling time in ms (obtained from CompletionHandler's "KernelEndTime - KernelStartTime")
47
+ std::atomic<double> totalSchedulingTime{0.0};
48
+ // indicates if the operation or copy execution has completed
49
+ std::atomic_bool completed{false};
50
+ // handle used to identify the profile info's instance (usually the pointer)
51
+ const uintptr_t handle;
52
+
53
+ virtual const std::string toString(double gpuTime = 0, double schedulingTime = 0) const;
54
+ // builds a string for a tensor (format: Device:ScalarType[tensor.sizes()])
55
+ static std::string buildTensorString(const Tensor& tensor, bool includeBufferId = false) {
56
+ if (tensor.defined()) {
57
+ std::stringstream tensorStr;
58
+ auto deviceType = tensor.device().type();
59
+ tensorStr << c10::DeviceTypeName(deviceType);
60
+ // see comments for INCLUDE_BUFFER_ID
61
+ if (includeBufferId && deviceType == at::kMPS) {
62
+ id<MTLBuffer> buffer = __builtin_bit_cast(id<MTLBuffer>, tensor.storage().data());
63
+ tensorStr << "(buf#" << (getIMPSAllocator()->getBufferId(buffer))
64
+ << ":" << buffer.retainCount << ")";
65
+ }
66
+ tensorStr << ":"
67
+ << tensor.scalar_type() << tensor.sizes();
68
+ return tensorStr.str();
69
+ } else {
70
+ return "undefined";
71
+ }
72
+ }
73
+ static uint64_t getTime() {
74
+ return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
75
+ }
76
+ };
77
+
78
+ struct OperationInfo : BaseInfo {
79
+ OperationInfo(const void* Handle, bool IsGraph, uint64_t Id, const std::string& StrKey) :
80
+ BaseInfo(IsGraph ? Type::GRAPH : Type::KERNEL, Id, uintptr_t(Handle)), strKey(StrKey) { }
81
+
82
+ uint64_t runCount = 0;
83
+ std::string strKey;
84
+
85
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
86
+
87
+ // builds a string for a kernel
88
+ static std::string buildKernelString(const std::string& kernelName,
89
+ const TensorList& tensors,
90
+ bool includeBufferId = false) {
91
+ std::stringstream kernelStr;
92
+ kernelStr << kernelName;
93
+ for (const Tensor& tensor: tensors) {
94
+ kernelStr << ":" << BaseInfo::buildTensorString(tensor, includeBufferId);
95
+ }
96
+ return kernelStr.str();
97
+ }
98
+ };
99
+
100
+ struct CpuFbInfo : BaseInfo {
101
+ CpuFbInfo(uint64_t Id, const std::string& OpName) :
102
+ BaseInfo(Type::CPU_FALLBACK, Id, 0), opName(OpName) { }
103
+
104
+ uint64_t runCount = 0;
105
+ // the current and total overhead of copies in bytes required to convert the Op's
106
+ // input tensors from MPS to CPU and then output from CPU back to MPS
107
+ size_t currentCopyOverhead = 0;
108
+ size_t totalCopyOverhead = 0;
109
+ std::string opName;
110
+ std::string strKey;
111
+ uint64_t startTime = 0;
112
+
113
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
114
+
115
+ void updateCopyOverhead(const TensorList& tensors) {
116
+ currentCopyOverhead = 0;
117
+ for (const Tensor& tensor: tensors) {
118
+ if (tensor.defined()) {
119
+ currentCopyOverhead += tensor.nbytes();
120
+ }
121
+ }
122
+ totalCopyOverhead += currentCopyOverhead;
123
+ }
124
+ };
125
+
126
+ struct CopyInfo : BaseInfo {
127
+ enum class Kind {
128
+ MPS_TO_MPS,
129
+ MPS_TO_CPU,
130
+ CPU_TO_MPS,
131
+ };
132
+
133
+ CopyInfo(const void* Handle, size_t Length, uint64_t Id, bool IsNonBlocking, bool UsesBlitter) :
134
+ BaseInfo(Type::COPY, Id, uintptr_t(Handle)), kind(Kind::MPS_TO_MPS),
135
+ length(Length), isNonBlocking(IsNonBlocking), usesBlitter(UsesBlitter) { }
136
+
137
+ Kind kind;
138
+ size_t length;
139
+ bool isNonBlocking;
140
+ bool usesBlitter;
141
+ std::string srcStrKey;
142
+ std::string dstStrKey;
143
+ // for copies that don't use blitters, we measure CPU time
144
+ uint64_t startTime = 0;
145
+
146
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
147
+
148
+ static std::string buildTensorString(const void* buffer, const OptionalTensorRef tensor, bool includeBufferId = false);
149
+
150
+ static bool isStorageOnMPS(const void* buffer, const OptionalTensorRef tensor) {
151
+ if (tensor.has_value()) {
152
+ return tensor->device().type() == at::kMPS;
153
+ }
154
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(buffer);
155
+ // getUnalignedBufferSize() returns -1 if input buffer is not on MPS device
156
+ return getIMPSAllocator()->getUnalignedBufferSize(buffer) >= 0;
157
+ }
158
+
159
+ static Kind getCopyKind(const void* srcBuffer, const void* dstBuffer,
160
+ const OptionalTensorRef srcTensor, const OptionalTensorRef dstTensor) {
161
+ const bool isSrcOnMPS = isStorageOnMPS(srcBuffer, srcTensor);
162
+ const bool isDstOnMPS = isStorageOnMPS(dstBuffer, dstTensor);
163
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isSrcOnMPS || isDstOnMPS);
164
+ if (isSrcOnMPS && !isDstOnMPS) {
165
+ return Kind::MPS_TO_CPU;
166
+ } else if (!isSrcOnMPS && isDstOnMPS) {
167
+ return Kind::CPU_TO_MPS;
168
+ }
169
+ return Kind::MPS_TO_MPS;
170
+ }
171
+ };
172
+
173
+ struct CopyStat : CopyInfo {
174
+ explicit CopyStat(std::string CopyKindStr) :
175
+ CopyInfo(nullptr, 0, 0, false, false), kindStr(std::move(CopyKindStr)) {}
176
+ // total number of copies
177
+ size_t totalCount = 0;
178
+ // number of Scalar copies (i.e., less than sizeof(int64))
179
+ size_t scalarsCount = 0;
180
+ // number of blocking copies (i.e., require syncing to GPU)
181
+ size_t blockingCount = 0;
182
+ // number of copies that used memcpy(), instead of Metal Blit Encoder
183
+ size_t memcpyCount = 0;
184
+ // accumulated GPU time in ms for the scalar copies
185
+ std::atomic<double> scalarsGpuTime{0.0};
186
+ // copy kind in string type
187
+ std::string kindStr;
188
+ };
189
+
190
+ class MPSProfiler {
191
+ public:
192
+ // lower 16 bits used for profiler options
193
+ enum ProfileOptions : uint32_t {
194
+ OPTIONS_NONE = 0,
195
+ // ALL_* means, all signpost types (RUN_OPERATION|BLIT_COPY|CPU_FALLBACK, etc.)
196
+ // (used for convenience to not compute bit flags by OR-ing manually)
197
+ // trace all signpost types using events
198
+ ALL_SIGNPOST_EVENTS = (1 << 0),
199
+ // trace all signpost types using intervals
200
+ ALL_SIGNPOST_INTERVALS = (1 << 1),
201
+ // always wait for command buffer to finish executing after each commit
202
+ WAIT_UNTIL_COMPLETED = (1 << 2),
203
+ // for interval-based signposts, include the scheduling portion of
204
+ // Graph/Kernel/Copy executions as well.
205
+ // if flag is disable, only "GPU run time" is included in interval,
206
+ // and not schedule time.
207
+ INCLUDE_SCHEDULE_INTERVAL = (1 << 3),
208
+
209
+ // use these if you need to trace signposts types individually (rarely required)
210
+ // trace signpost using intervals
211
+ USE_INTERVALS = (1 << 4),
212
+ // trace signpost by emitting events
213
+ USE_EVENTS = (1 << 5),
214
+ // used for sanity check (Change this when new option added)
215
+ OPTIONS_COUNT = (USE_EVENTS << 1) - 1,
216
+ };
217
+
218
+ // when adding new types, #define the type string in MPSProfiler.mm as well.
219
+ // upper 16 bits used for event types
220
+ enum SignpostTypes : uint32_t {
221
+ SIGNPOST_NONE = 0,
222
+ // trace signposts for PyTorch operation executions
223
+ RUN_OPERATION = (1 << 16),
224
+ // trace signposts for blitter copies
225
+ BLIT_COPY = (1 << 17),
226
+ // trace signposts for ops that fall back on CPU
227
+ CPU_FALLBACK = (1 << 18),
228
+ // used for sanity check (Change this when new type added)
229
+ SIGNPOST_COUNT = (CPU_FALLBACK << 1) - 1,
230
+ };
231
+
232
+ enum LogOptions : uint32_t {
233
+ LOG_NONE = 0,
234
+
235
+ // Info logging options during execution
236
+ // -------------------------------------
237
+ // prints operation info (id/key/run_count) during execution
238
+ OPERATION_INFO = (1 << 0),
239
+ // prints copy info (src/dst tensors/buffers, size, etc.) during execution
240
+ COPY_INFO = (1 << 1),
241
+ // prints CPU Fallback info (id/runCount/opName/copyOverhead) during execution
242
+ CPU_FALLBACK_INFO = (1 << 2),
243
+
244
+ // Profiling Statistics logging options when process terminates
245
+ // ------------------------------------------------------------
246
+ // prints all stats (OPERATION_STATS, COPY_STATS, CPU_FALLBACK_STATS) before process terminates
247
+ // this is convenient to not combine following stats bit flags manually
248
+ ALL_STATS = (1 << 3),
249
+ // prints operation stats (GPU times, run count, etc.) before process terminates
250
+ OPERATION_STATS = (1 << 4),
251
+ // prints copies stats (GPU times, copy kinds, sizes, etc.) before process terminates
252
+ COPY_STATS = (1 << 5),
253
+ // prints CPU Fallback stats (CPU times, run times, size of MPS<->CPU copies
254
+ // for tensors, etc.) before process terminates
255
+ CPU_FALLBACK_STATS = (1 << 6),
256
+
257
+ // Metadata format options when logging the info
258
+ // ---------------------------------------------
259
+ // if enabled, includes GPU run time in metadata (i.e., GPUEndTime-GPUStartTime
260
+ // from Metal Command Buffers) (e.g., [GPU=0.324 ms])
261
+ INCLUDE_GPU_TIME = (1 << 7),
262
+ // if enabled, includes GPU scheduling time in metadata separately
263
+ // (i.e., KernelEndTime-KernelStartTime from Metal Command Buffers)
264
+ // e.g., [GPU=0.324 ms, KRNL=0.036 ms]
265
+ INCLUDE_KERNEL_TIME = (1 << 8),
266
+ // if enabled, includes the unique buffer ID in metadata for the storage
267
+ // of a tensor that was allocated on MPSAllocator. This is useful (along with
268
+ // the EV "PYTORCH_DEBUG_MPS_ALLOCATOR") to identify buffers that are involved
269
+ // with various operations.
270
+ INCLUDE_BUFFER_ID = (1 << 9),
271
+
272
+ // used for sanity check (Change this when new option added)
273
+ LOG_COUNT = (INCLUDE_BUFFER_ID << 1) - 1,
274
+ };
275
+
276
+ explicit MPSProfiler();
277
+ ~MPSProfiler();
278
+
279
+ // the handle is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
280
+ // the beginProfile*() functions return a profileId which is unique per graph/kernel/copy
281
+ uint64_t beginProfileKernel(const void* handle, const std::string& strKey, bool isGraph);
282
+ uint64_t beginProfileKernel(const void* handle, const std::string& kernelName, const TensorList& tensors);
283
+ uint64_t beginProfileCopy(const void* srcBuffer, const void* dstBuffer,
284
+ const OptionalTensorRef srcTensor,
285
+ const OptionalTensorRef dstTensor,
286
+ size_t length, bool isNonBlocking, bool usesBlitter = true);
287
+ uint64_t beginProfileCPUFallback(const std::string& opName, const TensorList& tensors);
288
+ void beginProfileGPUInterval(const void* handle);
289
+
290
+ void endProfileCopy(uint64_t profileId, SyncType syncType);
291
+ void endProfileKernel(const void* handle, SyncType syncType = SyncType::NONE);
292
+ void endProfileCPUFallback(const std::string& opName);
293
+
294
+ // these are used to hook into Python bindings for torch.mps.profiler module.
295
+ // this enables generating OS Signpost traces from MPSProfiler on-demand
296
+ // during runtime (instead of environment variables).
297
+ // The "mode" could be either "interval", "event", or both "interval,event"
298
+ // for interval-based and/or event-based signpost tracing.
299
+ void StartTrace(const string& mode, bool waitUntilCompleted);
300
+ void StopTrace();
301
+
302
+ // convenience functions to indicate whether signpost tracing or
303
+ // logging are enabled for the SignpostTypes
304
+ bool isOperationProfilingEnabled() const {
305
+ return (m_signpost_types & SignpostTypes::RUN_OPERATION) ||
306
+ (m_log_options & (LogOptions::OPERATION_INFO | LogOptions::OPERATION_STATS));
307
+ }
308
+ bool isCopyProfilingEnabled() const {
309
+ return (m_signpost_types & SignpostTypes::BLIT_COPY) ||
310
+ (m_log_options & (LogOptions::COPY_INFO | LogOptions::COPY_STATS));
311
+ }
312
+ bool isCPUFallbackProfilingEnabled() const {
313
+ return (m_signpost_types & SignpostTypes::CPU_FALLBACK) ||
314
+ (m_log_options & (LogOptions::CPU_FALLBACK_INFO | LogOptions::CPU_FALLBACK_STATS));
315
+ }
316
+ bool isSignpostTracingEnabled() const {
317
+ return (m_signpost_types != SignpostTypes::SIGNPOST_NONE);
318
+ }
319
+
320
+ private:
321
+ // indicates what type of signpost types are enabled and traced by MPS profiler.
322
+ uint32_t m_signpost_types = 0;
323
+ uint32_t m_profile_options = 0;
324
+ uint32_t m_log_options = 0;
325
+ uint64_t m_kernel_counter = 0;
326
+ uint64_t m_graph_counter = 0;
327
+ uint64_t m_cpu_fb_counter = 0;
328
+ uint64_t m_copy_counter = 0;
329
+ // technically, it's possible to trace both events and intervals at the same time
330
+ // so we use separate os_log categories for them
331
+ os_log_t m_os_log_events;
332
+ os_log_t m_os_log_intervals;
333
+ // stats logging could run either from destructor or signal handler
334
+ // so this is used to check if logging has already started.
335
+ std::atomic_bool hasLoggedStats{false};
336
+ // indicates there are pending completionHandler callbacks that haven't been called yet.
337
+ std::atomic_bool hasPendingCompletionHandlers{false};
338
+ // used to capture sigint signal to log profiling stats
339
+ static struct sigaction currentSigint, previousSigint;
340
+
341
+ // We use the following lists for two reasons:
342
+ // 1- for interval-based signposts the "begin" point won't be in same function
343
+ // as the "end" point where we need to be able to retrieve signpost's info
344
+ // 2- if Operations info need to be logged when process ends using LogOptions::OPERATION_INFO.
345
+
346
+ // the pointer key for this map is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
347
+ // this list is retained and could be logged along with aggregate profiling numbers when the process ends.
348
+ std::unordered_map<uintptr_t, std::unique_ptr<OperationInfo>> m_op_info_list{};
349
+ // the string key for this map is the op name that we fall back to execute on CPU
350
+ // this list is retained and could be logged along with aggregate profiling numbers when the process ends.
351
+ std::unordered_map<std::string, std::unique_ptr<CpuFbInfo>> m_cpu_fb_info_list{};
352
+ // this list contains the info for copies, and its key is the unique profileId
353
+ // which is generated from m_copy_counter
354
+ // The copyInfo list is not retained.
355
+ std::unordered_map<uint64_t, std::unique_ptr<CopyInfo>> m_copy_info_list{};
356
+ // a short list that contains copy stats
357
+ std::unordered_map<CopyInfo::Kind, std::unique_ptr<CopyStat>> m_copy_stat_list{};
358
+
359
+ void initialize();
360
+ void beginProfileExecution(BaseInfo& info, bool cpuExecution = false);
361
+ void endProfileExecution(BaseInfo& info, os_signpost_id_t event_signpost_id,
362
+ os_signpost_id_t interval_signpost_id,
363
+ double gpuTime, double schedulingTime);
364
+ void addProfilerScheduledHandler(BaseInfo& info);
365
+ void addProfilerCompletedHandler(BaseInfo& info, SyncType syncType);
366
+ void emitSignpostEvent(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
367
+ const std::string& msg) const;
368
+ void beginSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
369
+ const std::string& msg) const;
370
+ void endSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id) const;
371
+
372
+ void updateCopyStats(const CopyInfo& copyInfo, double gpuTime, double schedulingTime);
373
+ // returns true if logging the profiling info "during the execution" is enabled
374
+ bool isProfileInfoLoggingEnabled(BaseInfo::Type infoType, bool isExecutionEnded);
375
+ // logs all the profiling stats that are enabled
376
+ void logProfilingStats();
377
+ // logs kernel profiling stats when the process ends.
378
+ void logOperationsProfilingStats(std::FILE* f) const;
379
+ // logs CPU Fallback profiling stats when the process ends.
380
+ void logCPUFallbackProfilingStats(std::FILE* f) const;
381
+ // logs copy profiling stats when the process ends.
382
+ void logCopyProfilingStats(std::FILE* f) const;
383
+
384
+ os_signpost_id_t generateSignpostId(os_signpost_type_t signpostType, const void* ptr = nullptr);
385
+ static SignpostTypes getSignpostType(BaseInfo::Type infoType);
386
+ static void handleIntSignal(int signal);
387
+ };
388
+
389
+ } // namespace Profiler
390
+
391
+ Profiler::MPSProfiler& getMPSProfiler();
392
+
393
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <cstdint>
6
+ #include <utility>
7
+
8
+ #include <c10/core/DeviceGuard.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/core/Stream.h>
11
+ #include <ATen/mps/MPSDevice.h>
12
+
13
+ #ifdef __OBJC__
14
+ #include <Foundation/Foundation.h>
15
+ #include <Metal/Metal.h>
16
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
17
+ #include <MetalPerformanceShadersGraph/MetalPerformanceShadersGraph.h>
18
+ typedef id<MTLCommandQueue> MTLCommandQueue_t;
19
+ typedef id<MTLCommandBuffer> MTLCommandBuffer_t;
20
+ typedef id<MTLComputeCommandEncoder> MTLComputeCommandEncoder_t;
21
+ typedef id<MTLSharedEvent> MTLSharedEvent_t;
22
+ typedef id<MTLDevice> MTLDevice_t;
23
+ #else
24
+ typedef void* MTLCommandQueue_t;
25
+ typedef void* MTLCommandQueue;
26
+ typedef void* MTLCommandBuffer_t;
27
+ typedef void* MTLCommandBuffer;
28
+ typedef void* MTLComputeCommandEncoder_t;
29
+ typedef void* MTLSharedEvent_t;
30
+ typedef void* dispatch_queue_t;
31
+ typedef void* MTLDevice_t;
32
+ #define nil NULL;
33
+ #endif
34
+
35
+
36
+ namespace at::mps {
37
+
38
+ //-----------------------------------------------------------------
39
+ // MPSStream
40
+ //-----------------------------------------------------------------
41
+
42
+ enum class SyncType {
43
+ NONE, // no commit to command buffer
44
+ COMMIT, // commit and flush the command buffer
45
+ COMMIT_AND_WAIT, // flush and wait for command buffer execution to finish
46
+ COMMIT_AND_CONTINUE,// commit and continue with a new underlying command buffer
47
+ COMMIT_ADAPTIVE, // commit adaptively based on available memory
48
+ };
49
+
50
+ class TORCH_API MPSStream
51
+ {
52
+ public:
53
+ enum Unchecked { UNCHECKED };
54
+
55
+ /// Construct a MPSStream from a Stream. This construction is checked,
56
+ /// and will raise an error if the Stream is not, in fact, a MPS stream.
57
+ explicit MPSStream(Stream stream);
58
+
59
+ ~MPSStream();
60
+ MTLCommandQueue_t commandQueue() const { return _commandQueue; };
61
+ dispatch_queue_t queue() const { return _serialQueue; }
62
+
63
+ MPSCommandBuffer* commandBuffer();
64
+ MTLComputeCommandEncoder_t commandEncoder();
65
+ void endKernelCoalescing();
66
+ void synchronize(SyncType syncType);
67
+ void fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t offset, SyncType syncType = SyncType::NONE);
68
+ void copy(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
69
+ size_t length, size_t srcOffset, size_t dstOffset,
70
+ uint64_t profileId, SyncType syncType = SyncType::NONE);
71
+ void copy_and_sync(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
72
+ size_t length, size_t srcOffset, size_t dstOffset,
73
+ bool non_blocking, uint64_t profileId);
74
+ void executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDictionary* results, SyncType syncType = SyncType::NONE);
75
+ void addCompletedHandler(MTLCommandBufferHandler block);
76
+
77
+ /// Get the MPS device index that this stream is associated with.
78
+ c10::DeviceIndex device_index() const { return _stream.device_index(); }
79
+
80
+ MTLCommandQueue_t stream() const { return _commandQueue; };
81
+
82
+ MTLDevice_t device() const { return [_commandQueue device];}
83
+
84
+ /// Explicit conversion to Stream.
85
+ Stream unwrap() const { return _stream; }
86
+
87
+ private:
88
+ Stream _stream;
89
+ MTLCommandQueue_t _commandQueue = nil;
90
+ MPSCommandBuffer* _commandBuffer = nil;
91
+ MPSCommandBuffer* _prevCommandBuffer = nil;
92
+ MTLComputeCommandEncoder_t _commandEncoder = nil;
93
+ MPSGraphExecutionDescriptor *_executionDescriptor = nil;
94
+ MPSGraphCompilationDescriptor *_compilationDescriptor = nil;
95
+ dispatch_queue_t _serialQueue = nullptr;
96
+ // CommitAndContinue is enabled by default
97
+ bool _enableCommitAndContinue = true;
98
+
99
+ // use synchronize() to access any of these commit functions outside MPSStream
100
+ void commit();
101
+ void commitAndWait();
102
+ void commitAndContinue();
103
+ void flush();
104
+ };
105
+
106
+ /**
107
+ * Get the current MPS stream
108
+ */
109
+ TORCH_API MPSStream* getCurrentMPSStream();
110
+
111
+ /**
112
+ * Get the default MPS stream
113
+ */
114
+ TORCH_API MPSStream* getDefaultMPSStream();
115
+
116
+ //-----------------------------------------------------------------
117
+ // MPSStreamImpl
118
+ //-----------------------------------------------------------------
119
+
120
+ class TORCH_API MPSStreamImpl
121
+ {
122
+ public:
123
+ /**
124
+ * Gets single instance of the MPSStream.
125
+ */
126
+ static MPSStream* getInstance();
127
+
128
+ private:
129
+ static MPSStream* _stream;
130
+ MPSStreamImpl();
131
+ };
132
+
133
+ } // namespace at::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ChannelShuffleKernel.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at { namespace native {
10
+
11
+ using channel_shuffle_fn = void(*)(TensorBase&, const TensorBase&, int64_t);
12
+ DECLARE_DISPATCH(channel_shuffle_fn, channel_shuffle_kernel);
13
+
14
+ }} // at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DistributionTemplates.h ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CPUApplyUtils.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/Dispatch_v2.h>
6
+ #include <ATen/ExpandBase.h>
7
+ #include <ATen/core/DistributionsHelper.h>
8
+ #include <ATen/native/TensorIterator.h>
9
+ #include <ATen/native/cpu/Loops.h>
10
+ #include <limits>
11
+ #include <mutex>
12
+
13
+ #ifdef CPU_CAPABILITY_AVX2
14
+ #include <ATen/native/cpu/avx_mathfun.h>
15
+ #include <c10/util/irange.h>
16
+ #endif
17
+
18
+
19
+ namespace at {
20
+ namespace native {
21
+ namespace templates {
22
+ namespace cpu {
23
+ namespace {
24
+
25
+ // ==================================================== Random ========================================================
26
+
27
+ template<typename RNG>
28
+ void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG generator) {
29
+ AT_DISPATCH_V2(iter.dtype(), "random_from_to_kernel_cpu", AT_WRAP([&] {
30
+ std::lock_guard<std::mutex> lock(generator->mutex_);
31
+ cpu_serial_kernel(iter, [range, base, generator]() -> scalar_t {
32
+ uniform_int_from_to_distribution<scalar_t> random(range, base);
33
+ return random(generator);
34
+ });
35
+ }), kBool, kHalf, kBFloat16, AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
36
+ }
37
+
38
+ // This is the special kernel to handle single specific case:
39
+ // from(inclusive) = std::numeric_limits<int64_t>::lowest()
40
+ // to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
41
+ template<typename RNG>
42
+ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG generator) {
43
+ AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel_cpu", [&] {
44
+ if constexpr (std::is_same<scalar_t, int64_t>::value ||
45
+ std::is_same<scalar_t, double>::value ||
46
+ std::is_same<scalar_t, float>::value ||
47
+ std::is_same<scalar_t, at::BFloat16>::value) {
48
+ std::lock_guard<std::mutex> lock(generator->mutex_);
49
+ cpu_serial_kernel(iter, [generator]() -> scalar_t {
50
+ uniform_int_full_range_distribution<scalar_t> random;
51
+ return random(generator);
52
+ });
53
+ } else {
54
+ TORCH_CHECK(false, "random_full_64_bits_range_kernel_cpu handles only int64, double, float and bfloat16");
55
+ }
56
+ });
57
+ }
58
+
59
+ template<typename RNG>
60
+ struct RandomFromToKernel {
61
+ void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
62
+ random_from_to_kernel(iter, range, base, check_generator<RNG>(gen));
63
+ }
64
+ void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
65
+ random_full_64_bits_range_kernel(iter, check_generator<RNG>(gen));
66
+ }
67
+ };
68
+
69
+ template<typename RNG>
70
+ void random_kernel(TensorIteratorBase& iter, RNG generator) {
71
+ std::lock_guard<std::mutex> lock(generator->mutex_);
72
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "random_kernel_cpu", [&] {
73
+ cpu_serial_kernel(iter, [generator]() -> scalar_t {
74
+ uniform_int_distribution<scalar_t> random;
75
+ return random(generator);
76
+ });
77
+ });
78
+ }
79
+
80
+ template<typename RNG>
81
+ struct RandomKernel {
82
+ void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
83
+ random_kernel(iter, check_generator<RNG>(gen));
84
+ }
85
+ };
86
+
87
+ // ==================================================== Normal ========================================================
88
+
89
+ #ifdef CPU_CAPABILITY_AVX2
90
+ static void normal_fill_16_AVX2(float *data,
91
+ const __m256* two_pi,
92
+ const __m256* one,
93
+ const __m256* minus_two,
94
+ const __m256* mean,
95
+ const __m256* std_v) {
96
+ const __m256 u1 = _mm256_sub_ps(*one, _mm256_loadu_ps(data));
97
+ const __m256 u2 = _mm256_loadu_ps(data + 8);
98
+ // sincos256_ps and log256_ps are from avx_mathfun.h
99
+ const __m256 radius = _mm256_sqrt_ps(_mm256_mul_ps(*minus_two, log256_ps(u1)));
100
+ const __m256 theta = _mm256_mul_ps(*two_pi, u2);
101
+ __m256 sintheta, costheta;
102
+ sincos256_ps(theta, &sintheta, &costheta);
103
+ const __m256 n1 = _mm256_mul_ps(radius, costheta);
104
+ const __m256 n2 = _mm256_mul_ps(radius, sintheta);
105
+ _mm256_storeu_ps(data, _mm256_fmadd_ps(n1, *std_v, *mean));
106
+ _mm256_storeu_ps(data + 8, _mm256_fmadd_ps(n2, *std_v, *mean));
107
+ }
108
+
109
+ template<typename RNG>
110
+ void normal_fill_AVX2(const TensorBase &self, const float mean, const float std, RNG generator) {
111
+ float *data = self.data_ptr<float>();
112
+ auto size = self.numel();
113
+ std::lock_guard<std::mutex> lock(generator->mutex_);
114
+ for (const auto i : c10::irange(size)) {
115
+ at::uniform_real_distribution<float> uniform(0, 1);
116
+ data[i] = uniform(generator);
117
+ }
118
+ const __m256 two_pi = _mm256_set1_ps(2.0f * c10::pi<double>);
119
+ const __m256 one = _mm256_set1_ps(1.0f);
120
+ const __m256 minus_two = _mm256_set1_ps(-2.0f);
121
+ const __m256 mean_v = _mm256_set1_ps(mean);
122
+ const __m256 std_v = _mm256_set1_ps(std);
123
+
124
+ for (int64_t i = 0; i < size - 15; i += 16) {
125
+ normal_fill_16_AVX2(data + i, &two_pi, &one, &minus_two, &mean_v, &std_v);
126
+ }
127
+
128
+ if (size % 16 != 0) {
129
+ // Recompute the last 16 values.
130
+ data = data + size - 16;
131
+ for (const auto i : c10::irange(16)) {
132
+ at::uniform_real_distribution<float> uniform(0, 1);
133
+ data[i] = uniform(generator);
134
+ }
135
+ normal_fill_16_AVX2(data, &two_pi, &one, &minus_two, &mean_v, &std_v);
136
+ }
137
+ }
138
+ #endif
139
+
140
+ template <typename scalar_t>
141
+ static void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t std) {
142
+ for (const auto j : c10::irange(8)) {
143
+ const scalar_t u1 = 1 - data[j]; // [0, 1) -> (0, 1] for log.
144
+ const scalar_t u2 = data[j + 8];
145
+ const scalar_t radius = std::sqrt(-2 * std::log(u1));
146
+ const scalar_t theta = 2.0f * c10::pi<double> * u2;
147
+ data[j] = radius * std::cos(theta) * std + mean;
148
+ data[j + 8] = radius * std::sin(theta) * std + mean;
149
+ }
150
+ }
151
+
152
+ template <typename scalar_t, typename RNG>
153
+ void normal_fill(const TensorBase &self, const scalar_t mean, const scalar_t std, RNG generator) {
154
+ scalar_t *data = self.data_ptr<scalar_t>();
155
+ auto size = self.numel();
156
+ std::lock_guard<std::mutex> lock(generator->mutex_);
157
+ for (const auto i : c10::irange(size)) {
158
+ at::uniform_real_distribution<scalar_t> uniform(0, 1);
159
+ data[i] = uniform(generator);
160
+ }
161
+
162
+ for (int64_t i = 0; i < size - 15; i += 16) {
163
+ normal_fill_16<scalar_t>(data + i, mean, std);
164
+ }
165
+ if (size % 16 != 0) {
166
+ // Recompute the last 16 values.
167
+ data = data + size - 16;
168
+ for (const auto i : c10::irange(16)) {
169
+ at::uniform_real_distribution<scalar_t> uniform(0, 1);
170
+ data[i] = uniform(generator);
171
+ }
172
+ normal_fill_16<scalar_t>(data, mean, std);
173
+ }
174
+ }
175
+
176
+ template<typename RNG>
177
+ void normal_kernel(const TensorBase &self, double mean, double std, RNG generator) {
178
+ auto size = self.numel();
179
+ if (self.scalar_type() == ScalarType::Float && size >= 16 && self.is_contiguous()) {
180
+ #ifdef CPU_CAPABILITY_AVX2
181
+ normal_fill_AVX2(self, static_cast<float>(mean), static_cast<float>(std), generator);
182
+ #else
183
+ normal_fill(self, static_cast<float>(mean), static_cast<float>(std), generator);
184
+ #endif
185
+ } else {
186
+ AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, self.scalar_type(), "normal_kernel_cpu", [&] {
187
+ if (size >= 16 && self.is_contiguous()) {
188
+ normal_fill<scalar_t>(self, static_cast<scalar_t>(mean), static_cast<scalar_t>(std), generator);
189
+ } else {
190
+ auto iter = TensorIterator::borrowing_nullary_op(self);
191
+ std::lock_guard<std::mutex> lock(generator->mutex_);
192
+ cpu_serial_kernel(iter, [mean, std, generator]() -> scalar_t {
193
+ at::normal_distribution<double> normal(mean, std);
194
+ return static_cast<scalar_t>(normal(generator));
195
+ });
196
+ }
197
+ });
198
+ }
199
+ }
200
+
201
+ template<typename RNG>
202
+ struct NormalKernel {
203
+ void operator()(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
204
+ normal_kernel(self, mean, std, check_generator<RNG>(gen));
205
+ }
206
+ };
207
+
208
+ // ==================================================== Uniform =======================================================
209
+
210
+ template<typename RNG>
211
+ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG generator) {
212
+ AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "uniform_kernel_cpu", [&]() {
213
+ std::lock_guard<std::mutex> lock(generator->mutex_);
214
+ auto from = static_cast<scalar_t>(from_);
215
+ auto to = static_cast<scalar_t>(to_);
216
+ at::uniform_real_distribution<scalar_t> uniform(from, to);
217
+ cpu_serial_kernel(iter, [&uniform, generator]() -> scalar_t {
218
+ return static_cast<scalar_t>(uniform(generator));
219
+ });
220
+ });
221
+ }
222
+
223
+ template<typename RNG>
224
+ struct UniformKernel {
225
+ void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
226
+ uniform_kernel(iter, from, to, check_generator<RNG>(gen));
227
+ }
228
+ };
229
+
230
+ // ==================================================== Cauchy ========================================================
231
+
232
+ template<typename RNG>
233
+ void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, RNG generator) {
234
+ AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "cauchy_cpu", [&]() {
235
+ std::lock_guard<std::mutex> lock(generator->mutex_);
236
+ at::cauchy_distribution<double> cauchy(median, sigma);
237
+ cpu_serial_kernel(iter, [&cauchy, generator]() -> scalar_t {
238
+ return static_cast<scalar_t>(cauchy(generator));
239
+ });
240
+ });
241
+ }
242
+
243
+ template<typename RNG>
244
+ struct CauchyKernel {
245
+ void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
246
+ cauchy_kernel(iter, median, sigma, check_generator<RNG>(gen));
247
+ }
248
+ };
249
+
250
+ // ================================================== LogNormal =======================================================
251
+
252
+ template<typename RNG>
253
+ void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, RNG generator) {
254
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cpu", [&]() {
255
+ std::lock_guard<std::mutex> lock(generator->mutex_);
256
+ at::lognormal_distribution<double> logNormal(mean, std);
257
+ cpu_serial_kernel(iter, [&logNormal, generator]() -> scalar_t {
258
+ return static_cast<scalar_t>(logNormal(generator));
259
+ });
260
+ });
261
+ }
262
+
263
+ template<typename RNG>
264
+ struct LogNormalKernel {
265
+ void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
266
+ log_normal_kernel(iter, mean, std, check_generator<RNG>(gen));
267
+ }
268
+ };
269
+
270
+ // =================================================== Geometric ======================================================
271
+
272
+ template<typename RNG>
273
+ void geometric_kernel(TensorIteratorBase& iter, double p, RNG generator) {
274
+ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cpu", [&]() {
275
+ std::lock_guard<std::mutex> lock(generator->mutex_);
276
+ at::geometric_distribution<double> geometric(p);
277
+ cpu_serial_kernel(iter, [&geometric, generator]() -> scalar_t {
278
+ return static_cast<scalar_t>(geometric(generator));
279
+ });
280
+ });
281
+ }
282
+
283
+ template<typename RNG>
284
+ struct GeometricKernel {
285
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
286
+ geometric_kernel(iter, p, check_generator<RNG>(gen));
287
+ }
288
+ };
289
+
290
+ // ================================================== Exponential =====================================================
291
+
292
+ template<typename RNG>
293
+ void exponential_kernel(TensorIteratorBase& iter, double lambda, RNG generator) {
294
+ TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype());
295
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cpu", [&]() {
296
+ std::lock_guard<std::mutex> lock(generator->mutex_);
297
+ at::exponential_distribution<double> exponential(lambda);
298
+ cpu_serial_kernel(iter, [&exponential, generator]() -> scalar_t {
299
+ return static_cast<scalar_t>(exponential(generator));
300
+ });
301
+ });
302
+ }
303
+
304
+ template<typename RNG>
305
+ struct ExponentialKernel {
306
+ void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
307
+ exponential_kernel(iter, lambda, check_generator<RNG>(gen));
308
+ }
309
+ };
310
+
311
+ // ================================================== Bernoulli =======================================================
312
+
313
+ template<typename RNG>
314
+ void bernoulli_kernel(const TensorBase &self, const TensorBase &p_, RNG generator) {
315
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half,
316
+ self.scalar_type(), "bernoulli_tensor_cpu_self_", [&] {
317
+ // See Note [Acquire lock when using random generators]
318
+ std::lock_guard<std::mutex> lock(generator->mutex_);
319
+ using self_t = scalar_t;
320
+ auto p_cpu = p_.to(kCPU);
321
+ auto p = expand_inplace(self, p_cpu);
322
+ auto iter = TensorIteratorConfig()
323
+ .add_output(self)
324
+ .add_input(*p)
325
+ .check_all_same_dtype(false)
326
+ .build();
327
+ if (p->scalar_type() == kDouble) {
328
+ cpu_serial_kernel(iter, [&](const double p_val) -> self_t {
329
+ at::bernoulli_distribution<double> bernoulli(p_val);
330
+ return static_cast<self_t>(bernoulli(generator));
331
+ });
332
+ } else {
333
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::BFloat16, at::ScalarType::Half,
334
+ p->scalar_type(), "bernoulli_tensor_cpu_p_", [&] {
335
+ using p_t = scalar_t;
336
+ cpu_serial_kernel(iter, [&](const p_t p_val) -> self_t {
337
+ at::bernoulli_distribution<float> bernoulli(p_val);
338
+ return static_cast<self_t>(bernoulli(generator));
339
+ });
340
+ });
341
+ }
342
+ });
343
+ }
344
+
345
+ template<typename RNG>
346
+ void bernoulli_kernel(const TensorBase &self, double p, RNG generator) {
347
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half,
348
+ self.scalar_type(), "bernoulli_scalar_cpu_", [&] {
349
+ // See Note [Acquire lock when using random generators]
350
+ std::lock_guard<std::mutex> lock(generator->mutex_);
351
+ auto iter = TensorIterator::borrowing_nullary_op(self);
352
+ cpu_serial_kernel(iter, [p, generator]() -> scalar_t {
353
+ at::bernoulli_distribution<double> bernoulli(p);
354
+ return static_cast<scalar_t>(bernoulli(generator));
355
+ });
356
+ });
357
+ }
358
+
359
+ template<typename RNG>
360
+ struct BernoulliKernel {
361
+ void operator()(const TensorBase &self, double p, c10::optional<Generator> gen) {
362
+ bernoulli_kernel(self, p, check_generator<RNG>(gen));
363
+ }
364
+ void operator()(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
365
+ bernoulli_kernel(self, p_, check_generator<RNG>(gen));
366
+ }
367
+ };
368
+
369
+ }}}}}
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IndexKernelUtils.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/TensorIterator.h>
3
+ #include <c10/util/irange.h>
4
+
5
+ namespace at {
6
+ namespace native {
7
+
8
+ namespace {
9
+ static bool is_constant_index(int ntensor, const int64_t* strides) {
10
+ AT_ASSERT(ntensor >= 3);
11
+ for (const auto arg : c10::irange(2, ntensor)) {
12
+ if (strides[arg] != 0) {
13
+ return false;
14
+ }
15
+ }
16
+ return true;
17
+ }
18
+
19
+
20
+ struct Indexer {
21
+ Indexer(int64_t num_indexers, char** indexers, const int64_t* indexer_strides,
22
+ IntArrayRef original_sizes, IntArrayRef original_strides)
23
+ : num_indexers(num_indexers)
24
+ , indexers(indexers)
25
+ , indexer_strides(indexer_strides)
26
+ , original_strides(original_strides.data())
27
+ , original_sizes(original_sizes.data()) {
28
+ AT_ASSERT(static_cast<int64_t>(original_strides.size()) == num_indexers);
29
+ AT_ASSERT(static_cast<int64_t>(original_sizes.size()) == num_indexers);
30
+ }
31
+
32
+ int64_t num_indexers;
33
+ char** indexers;
34
+ const int64_t* indexer_strides;
35
+ const int64_t* original_strides;
36
+ const int64_t* original_sizes;
37
+
38
+ int64_t get(int64_t idx) {
39
+ int64_t offset = 0;
40
+ for (const auto j : c10::irange(num_indexers)) {
41
+ int64_t value = *(int64_t*)&indexers[j][idx * indexer_strides[j]];
42
+ int64_t size = original_sizes[j];
43
+ TORCH_CHECK_INDEX(value >= -size && value < size,
44
+ "index ", value, " is out of bounds for dimension ", j, " with size ", size);
45
+ if (value < 0) {
46
+ value += size;
47
+ }
48
+ offset += value * original_strides[j];
49
+ }
50
+ return offset;
51
+ }
52
+ };
53
+ } // anonymous namespace
54
+
55
+ template <typename scalar_t, typename func_t>
56
+ void cpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride,
57
+ const func_t& f, bool serial_execution=false)
58
+ {
59
+ int ntensor = iter.ntensors();
60
+ // When launch the index parallel version, set a relative small grain size less than the INTERNAL::GRAIN_SIZE
61
+ // to make the whole available thread numbers get more balanced work load and a better cache location.
62
+ // The grain size here is chosen by the op benchmark to overcome the thread launch overhead
63
+ const int index_parallel_grain_size = 3000;
64
+ auto loop = [&](char** data, const int64_t* strides, int64_t n) {
65
+ auto indexer = Indexer(ntensor - 2, &data[2], &strides[2], index_size, index_stride);
66
+ char* dst = data[0];
67
+ char* src = data[1];
68
+ if (is_constant_index(ntensor, strides)) {
69
+ // specialization for when every element uses the same index
70
+ int64_t offset = indexer.get(0);
71
+ for (const auto i : c10::irange(n)) {
72
+ f(dst + strides[0] * i, src + strides[1] * i, offset);
73
+ }
74
+ } else {
75
+ for (const auto i : c10::irange(n)) {
76
+ int64_t offset = indexer.get(i);
77
+ f(dst + strides[0] * i, src + strides[1] * i, offset);
78
+ }
79
+ }
80
+ };
81
+ if (serial_execution) {
82
+ iter.serial_for_each(loop, {0, iter.numel()});
83
+ } else {
84
+ iter.for_each(loop, index_parallel_grain_size);
85
+ }
86
+ }
87
+ } // at
88
+ } // native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ReduceUtils.h ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <ATen/NumericUtils.h>
5
+ #include <ATen/cpu/vec/vec.h>
6
+ #include <ATen/cpu/vec/functional.h>
7
+ #include <ATen/native/ReductionType.h>
8
+ #include <c10/util/irange.h>
9
+ #include <ATen/OpMathType.h>
10
+ #include <ATen/native/cpu/utils.h>
11
+ #include <ATen/OpMathType.h>
12
+
13
+ namespace at::native {
14
+ inline namespace CPU_CAPABILITY {
15
+
16
+ using namespace vec;
17
+
18
+ #define AT_DISPATCH_REDUCTION_TYPES(op, ...) \
19
+ [&] { \
20
+ switch (op) { \
21
+ case ReductionType::SUM: { \
22
+ static constexpr auto reduce = ReductionType::SUM; \
23
+ return __VA_ARGS__(); \
24
+ } \
25
+ case ReductionType::MEAN: { \
26
+ static constexpr auto reduce = ReductionType::MEAN; \
27
+ return __VA_ARGS__(); \
28
+ } \
29
+ case ReductionType::MIN: { \
30
+ static constexpr auto reduce = ReductionType::MIN; \
31
+ return __VA_ARGS__(); \
32
+ } \
33
+ case ReductionType::MAX: { \
34
+ static constexpr auto reduce = ReductionType::MAX; \
35
+ return __VA_ARGS__(); \
36
+ } \
37
+ case ReductionType::PROD: { \
38
+ static constexpr auto reduce = ReductionType::PROD; \
39
+ return __VA_ARGS__(); \
40
+ } \
41
+ } \
42
+ }()
43
+
44
+ template <typename scalar_t, ReductionType reduce>
45
+ inline vec_scalar_t<scalar_t> init_value() {
46
+ using acc_t = vec_scalar_t<scalar_t>;
47
+ acc_t val;
48
+ if (reduce == ReductionType::SUM ||
49
+ reduce == ReductionType::MEAN) {
50
+ val = static_cast<acc_t>(0);
51
+ } else if (reduce == ReductionType::PROD) {
52
+ val = static_cast<acc_t>(1);
53
+ } else if (reduce == ReductionType::MAX) {
54
+ val = -std::numeric_limits<acc_t>::infinity();
55
+ } else {
56
+ TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
57
+ val = std::numeric_limits<acc_t>::infinity();
58
+ }
59
+ return val;
60
+ }
61
+
62
+ template <typename scalar_t, ReductionType reduce>
63
+ inline vec_scalar_t<scalar_t> init_value(const c10::optional<Scalar>& initial) {
64
+ using acc_t = vec_scalar_t<scalar_t>;
65
+ if (initial.has_value()) {
66
+ return initial.value().to<acc_t>();
67
+ } else {
68
+ return init_value<scalar_t, reduce>();
69
+ }
70
+ }
71
+
72
+ template <typename scalar_t>
73
+ inline void init(scalar_t* out, int64_t size, const vec_scalar_t<scalar_t>& val) {
74
+ using Vec = Vectorized<vec_scalar_t<scalar_t>>;
75
+ map<scalar_t>(
76
+ [val](Vec x) { return Vec(val); },
77
+ out,
78
+ out,
79
+ size);
80
+ }
81
+
82
+ template <typename scalar_t, ReductionType reduce>
83
+ inline void init(scalar_t* out, int64_t size, const c10::optional<Scalar>& initial) {
84
+ using acc_t = vec_scalar_t<scalar_t>;
85
+ acc_t val = init_value<scalar_t, reduce>(initial);
86
+ init(out, size, val);
87
+ }
88
+
89
+ // overload with `include_self`, used by scatter_reduce
90
+ template <typename scalar_t, ReductionType reduce>
91
+ inline void init(scalar_t* out, int64_t size, bool include_self = false) {
92
+ using acc_t = vec_scalar_t<scalar_t>;
93
+ if (!include_self) {
94
+ acc_t val = init_value<scalar_t, reduce>();
95
+ init(out, size, val);
96
+ }
97
+ }
98
+
99
+ template <typename scalar_t, ReductionType reduce>
100
+ inline void _init(scalar_t* self_ptr, at::opmath_type<scalar_t>* buffer_ptr, int64_t size, bool include_self) {
101
+ if (!include_self) {
102
+ init<at::opmath_type<scalar_t>, reduce>(buffer_ptr, size, include_self);
103
+ } else {
104
+ vec::convert(self_ptr, buffer_ptr, size);
105
+ }
106
+ }
107
+
108
+ template <typename scalar_t>
109
+ inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
110
+ _max(const scalar_t& x, const scalar_t& y) {
111
+ return at::_isnan(y) ? y : std::max(x, y);
112
+ }
113
+
114
+ template <typename scalar_t>
115
+ inline Vectorized<scalar_t> _max(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
116
+ // vec::maximum propagates NaN
117
+ return vec::maximum(x, y);
118
+ }
119
+
120
+ template <typename vec_t>
121
+ inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
122
+ _max(const vec_t& x, const vec_t& y) {
123
+ // vec::maximum propagates NaN
124
+ return maximum(x, y);
125
+ }
126
+
127
+ template <typename scalar_t>
128
+ inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
129
+ _min(const scalar_t& x, const scalar_t& y) {
130
+ return at::_isnan(y) ? y : std::min(x, y);
131
+ }
132
+
133
+ template <typename scalar_t>
134
+ inline Vectorized<scalar_t> _min(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
135
+ // vec::minimum propagates NaN
136
+ return vec::minimum(x, y);
137
+ }
138
+
139
+ template <typename vec_t>
140
+ inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
141
+ _min(const vec_t& x, const vec_t& y) {
142
+ // vec::minimum propagates NaN
143
+ return minimum(x, y);
144
+ }
145
+
146
+ template <typename scalar_t, typename accumut, typename Op,
147
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
148
+ inline void map_acc(
149
+ const Op& vec_fun,
150
+ accumut* output_data,
151
+ const accumut* input_data,
152
+ const scalar_t* input_data2,
153
+ int64_t size) {
154
+ using Vec = vec::Vectorized<scalar_t>;
155
+ using aVec = vec::Vectorized<accumut>;
156
+ int64_t d = 0;
157
+ constexpr int64_t kVecSize = Vec::size();
158
+ constexpr int64_t kaVecSize = aVec::size();
159
+ for (d = 0; d < size - (size % kVecSize); d += kVecSize) {
160
+ Vec data2_vec = Vec::loadu(input_data2 + d);
161
+ auto [data2_avec0, data2_avec1] = convert_to_float<scalar_t>(data2_vec);
162
+ aVec input_vec0 = aVec::loadu(input_data + d);
163
+ aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize);
164
+ vec_fun(input_vec0, data2_avec0).store(output_data + d);
165
+ vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize);
166
+ }
167
+ if (size - d > 0) {
168
+ int64_t tail_size = size - d;
169
+ Vec data2_vec = Vec::loadu(input_data2 + d, tail_size);
170
+ auto [data2_avec0, data2_avec1] = convert_to_float<scalar_t>(data2_vec);
171
+ if (tail_size > kaVecSize) {
172
+ aVec input_vec0 = aVec::loadu(input_data + d);
173
+ aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize, tail_size - kaVecSize);
174
+ vec_fun(input_vec0, data2_avec0).store(output_data + d);
175
+ vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize, tail_size - kaVecSize);
176
+ } else {
177
+ aVec input_vec0 = aVec::loadu(input_data + d, tail_size);
178
+ vec_fun(input_vec0, data2_avec0).store(output_data + d, tail_size);
179
+ }
180
+ }
181
+ }
182
+
183
+ // for Max and Min, propagate NaN:
184
+ template <typename T, ReductionType reduce>
185
+ inline T update(const T& x, const T& y) {
186
+ if (reduce == ReductionType::SUM ||
187
+ reduce == ReductionType::MEAN) {
188
+ return x + y;
189
+ } else if (reduce == ReductionType::PROD) {
190
+ return x * y;
191
+ } else if (reduce == ReductionType::MAX) {
192
+ return _max(x, y);
193
+ } else {
194
+ TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
195
+ return _min(x, y);
196
+ }
197
+ }
198
+
199
+ template <typename scalar_t, ReductionType reduce>
200
+ inline void update(scalar_t* out, const scalar_t* data, int64_t K) {
201
+ using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
202
+ map2<scalar_t>(
203
+ [](Vec x, Vec y) { return update<Vec, reduce>(x, y); },
204
+ out,
205
+ out,
206
+ data,
207
+ K);
208
+ }
209
+
210
+ template <typename scalar_t, ReductionType reduce,
211
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
212
+ inline void update(at::opmath_type<scalar_t>* out, const scalar_t* data, int64_t K) {
213
+ using opmath_t = at::opmath_type<scalar_t>;
214
+ using Vec = vec::Vectorized<opmath_t>;
215
+ map_acc<scalar_t, opmath_t>(
216
+ [](Vec x, Vec y) { return update<Vec, reduce>(x, y); },
217
+ out,
218
+ out,
219
+ data,
220
+ K);
221
+ }
222
+
223
+ template <typename scalar_t, ReductionType reduce>
224
+ inline void write(scalar_t* out, int64_t count, int64_t K) {
225
+ using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
226
+ if (reduce == ReductionType::MEAN) {
227
+ if (count > 0) {
228
+ vec::map<scalar_t>(
229
+ [count](Vec x) { return x / Vec(count); },
230
+ out,
231
+ out,
232
+ K);
233
+ }
234
+ }
235
+ }
236
+
237
+ } // namespace CPU_CAPABILITY
238
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/int_mm_kernel.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using weight_to_int4pack_fn = void(*)(const Tensor&, const Tensor&, int, int);
9
+ using int4pack_mm_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, int, const Tensor&, int, int);
10
+ using int8pack_mm_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&);
11
+
12
+ DECLARE_DISPATCH(weight_to_int4pack_fn, weight_to_int4pack_stub);
13
+ DECLARE_DISPATCH(int4pack_mm_fn, int4pack_mm_stub);
14
+ DECLARE_DISPATCH(int8pack_mm_fn, int8pack_mm_stub);
15
+
16
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/mixed_data_type.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+
5
+ namespace at { namespace native {
6
+
7
+ inline ScalarType first_type() {
8
+ return ScalarType::Undefined;
9
+ }
10
+
11
+ template <typename... Args>
12
+ inline ScalarType first_type(const Tensor& arg, const Args&... parameters) {
13
+ return arg.defined() ? arg.scalar_type() : first_type(parameters...);
14
+ }
15
+
16
+ template <typename... Args>
17
+ inline bool is_mixed_type(const Tensor& input, const Args&... parameters) {
18
+ const auto parameter_type = first_type(parameters...);
19
+ return ((parameter_type != ScalarType::Undefined) &&
20
+ (parameter_type != input.scalar_type()));
21
+ }
22
+
23
+ // currently on CPU, mixed data type is only supported
24
+ // when input is 'BFloat16' or 'Half' and parameters are 'Float'
25
+ inline void check_mixed_data_type(const Tensor& input) {
26
+ TORCH_CHECK(at::isReducedFloatingType(input.scalar_type()),
27
+ "mixed dtype (CPU): all inputs must share same datatype.");
28
+ }
29
+
30
+ template <typename... Args>
31
+ inline void check_mixed_data_type(const Tensor& input, const Tensor& parameter, const Args&... parameters) {
32
+ TORCH_CHECK(!parameter.defined() || parameter.scalar_type() == ScalarType::Float,
33
+ "mixed dtype (CPU): expect parameter to have scalar type of Float");
34
+ check_mixed_data_type(input, parameters...);
35
+ }
36
+
37
+ inline ScalarType param_scalar_type(const Tensor& t, bool is_mixed_type) {
38
+ return is_mixed_type ? ScalarType::Float : t.scalar_type();
39
+ }
40
+
41
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/moments_utils.h ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cstring>
5
+ #include <numeric>
6
+ #include <utility>
7
+ #include <vector>
8
+
9
+ #include <ATen/Parallel.h>
10
+ #include <ATen/OpMathType.h>
11
+ #include <ATen/cpu/vec/vec.h>
12
+ #include <ATen/native/cpu/utils.h>
13
+ #include <c10/util/SmallVector.h>
14
+ #include <c10/util/irange.h>
15
+
16
+ namespace at {
17
+ namespace native {
18
+ inline namespace CPU_CAPABILITY {
19
+
20
+ template<typename T> using opmath_t = at::opmath_type<T>;
21
+
22
+ constexpr int64_t kChunkSize = 16;
23
+
24
+ template <typename T>
25
+ void AddMoments(
26
+ int64_t m0_add,
27
+ const T& m1_add,
28
+ const T& m2_add,
29
+ int64_t& m0,
30
+ T& m1,
31
+ T& m2) {
32
+ const int64_t n = m0 + m0_add;
33
+ const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
34
+ const T delta = m1_add - m1;
35
+ m1 += c * delta;
36
+ m2 += m2_add + delta * delta * c * static_cast<T>(m0);
37
+ m0 = n;
38
+ }
39
+
40
+ template <typename T>
41
+ C10_ALWAYS_INLINE void AddMomentsVec(
42
+ int64_t m0_add,
43
+ const vec::Vectorized<T>& m1_add,
44
+ const vec::Vectorized<T>& m2_add,
45
+ int64_t& m0,
46
+ vec::Vectorized<T>& m1,
47
+ vec::Vectorized<T>& m2) {
48
+ using Vec = vec::Vectorized<T>;
49
+ const int64_t n = m0 + m0_add;
50
+ const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
51
+ const Vec c_vec(c);
52
+ const Vec delta = m1_add - m1;
53
+ m1 += c_vec * delta;
54
+ m2 += m2_add + delta * delta * c_vec * Vec(static_cast<T>(m0));
55
+ m0 = n;
56
+ }
57
+
58
+ template <typename T>
59
+ inline typename std::enable_if<std::is_same<T, opmath_t<T>>::value, void>::type
60
+ UpdateMomentsVec(
61
+ int64_t m0,
62
+ const T* X_ptr,
63
+ const std::array<vec::Vectorized<opmath_t<T>>, kChunkSize>& c_vecs,
64
+ int64_t& m0_stk0,
65
+ vec::Vectorized<opmath_t<T>>& m1_stk0,
66
+ vec::Vectorized<opmath_t<T>>& m2_stk0) {
67
+ using Vec = vec::Vectorized<opmath_t<T>>;
68
+ Vec m1_vec(0);
69
+ Vec m2_vec(0);
70
+ for (const auto j : c10::irange(m0)) {
71
+ const Vec x_vec = Vec::loadu(X_ptr + j * Vec::size());
72
+ const Vec delta_vec = x_vec - m1_vec;
73
+ m1_vec += delta_vec * c_vecs[j];
74
+ m2_vec += delta_vec * (x_vec - m1_vec);
75
+ }
76
+ AddMomentsVec(m0, m1_vec, m2_vec, m0_stk0, m1_stk0, m2_stk0);
77
+ }
78
+
79
+ // each bfloat16/half vector will be converted to two float vectors,
80
+ // and accumulated successively on m1_stk0/m2_stk0.
81
+ template <typename T>
82
+ inline typename std::enable_if<!std::is_same<T, at::opmath_type<T>>::value, void>::type
83
+ UpdateMomentsVec(
84
+ int64_t m0,
85
+ const T* X_ptr,
86
+ const std::array<vec::Vectorized<at::opmath_type<T>>, kChunkSize>& c_vecs,
87
+ int64_t& m0_stk0,
88
+ vec::Vectorized<at::opmath_type<T>>& m1_stk0,
89
+ vec::Vectorized<at::opmath_type<T>>& m2_stk0) {
90
+ using Vec = vec::Vectorized<T>;
91
+ using fVec = vec::Vectorized<at::opmath_type<T>>;
92
+ fVec m1_fvec0(0), m1_fvec1(0);
93
+ fVec m2_fvec0(0), m2_fvec1(0);
94
+ for (const auto j : c10::irange(m0)) {
95
+ const Vec x_bvec = Vec::loadu(X_ptr + j * Vec::size());
96
+ auto [x_fvec0, x_fvec1] = convert_to_float<T>(x_bvec);
97
+ const fVec delta_fvec0 = x_fvec0 - m1_fvec0;
98
+ const fVec delta_fvec1 = x_fvec1 - m1_fvec1;
99
+ m1_fvec0 += delta_fvec0 * c_vecs[j];
100
+ m1_fvec1 += delta_fvec1 * c_vecs[j];
101
+ m2_fvec0 += delta_fvec0 * (x_fvec0 - m1_fvec0);
102
+ m2_fvec1 += delta_fvec1 * (x_fvec1 - m1_fvec1);
103
+ }
104
+ AddMomentsVec(m0, m1_fvec0, m2_fvec0, m0_stk0, m1_stk0, m2_stk0);
105
+ AddMomentsVec(m0, m1_fvec1, m2_fvec1, m0_stk0, m1_stk0, m2_stk0);
106
+ }
107
+
108
+ // Compute rowwise moments by Welford algorithm and cascade sum to improve
109
+ // numerical stability.
110
+ // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
111
+ // https://en.wikipedia.org/wiki/Pairwise_summation
112
+ template <typename T, int64_t kMaxDepth>
113
+ std::pair<opmath_t<T>, opmath_t<T>> RowwiseMomentsImpl(const T* X, int64_t N, int64_t ddof = 0) {
114
+ using math_t = opmath_t<T>;
115
+
116
+ constexpr int64_t kVecSize = vec::Vectorized<T>::size();
117
+ constexpr int64_t kAccVecSize = vec::Vectorized<math_t>::size();
118
+ const int64_t n = N / kVecSize;
119
+ const int64_t m = divup(n, kChunkSize);
120
+ const int64_t depth = utils::CeilLog2(m);
121
+
122
+ using Vec = vec::Vectorized<math_t>;
123
+ const Vec kZeroVec(math_t(0));
124
+ c10::SmallVector<int64_t, kMaxDepth> m0_stk(depth, 0);
125
+ c10::SmallVector<Vec, kMaxDepth> m1_stk(depth, kZeroVec);
126
+ c10::SmallVector<Vec, kMaxDepth> m2_stk(depth, kZeroVec);
127
+
128
+ for (const auto i : c10::irange(m)) {
129
+ const T* X_ptr = X + i * kChunkSize * kVecSize;
130
+ const int64_t m0 = std::min(kChunkSize, n - i * kChunkSize);
131
+ static std::array<Vec, kChunkSize> c_vecs = ([]() {
132
+ std::array<Vec, kChunkSize> result;
133
+ for (const auto i : c10::irange(kChunkSize)) {
134
+ result[i] = Vec(math_t(1) / static_cast<math_t>(i + 1));
135
+ }
136
+ return result;
137
+ })();
138
+ UpdateMomentsVec(m0, X_ptr, c_vecs, m0_stk[0], m1_stk[0], m2_stk[0]);
139
+
140
+ int64_t mask = i + 1;
141
+ for (int64_t j = 1; j < depth && (mask & 1) == 0; ++j) {
142
+ AddMomentsVec(
143
+ m0_stk[j - 1],
144
+ m1_stk[j - 1],
145
+ m2_stk[j - 1],
146
+ m0_stk[j],
147
+ m1_stk[j],
148
+ m2_stk[j]);
149
+ m0_stk[j - 1] = 0;
150
+ m1_stk[j - 1] = kZeroVec;
151
+ m2_stk[j - 1] = kZeroVec;
152
+ mask >>= 1;
153
+ }
154
+ }
155
+ for (const auto i : c10::irange(1, depth)) {
156
+ AddMomentsVec(
157
+ m0_stk[i], m1_stk[i], m2_stk[i], m0_stk[0], m1_stk[0], m2_stk[0]);
158
+ }
159
+
160
+ std::array<math_t, kAccVecSize> m1_arr{};
161
+ std::array<math_t, kAccVecSize> m2_arr{};
162
+ m1_stk[0].store(m1_arr.data());
163
+ m2_stk[0].store(m2_arr.data());
164
+
165
+ int64_t m0 = 0;
166
+ math_t m1 = 0;
167
+ math_t m2 = 0;
168
+ for (int64_t i = n * kVecSize; i < N; ++i) {
169
+ math_t x = static_cast<math_t>(X[i]);
170
+ const math_t delta = x - m1;
171
+ ++m0;
172
+ m1 += delta / static_cast<math_t>(m0);
173
+ m2 += delta * (x - m1);
174
+ }
175
+ // for BFloat16, each vector in m1_arr/m2_arr holds 2*n accumulated result
176
+ int64_t m0_add = n * kVecSize / kAccVecSize;
177
+ for (const auto i : c10::irange(kAccVecSize)) {
178
+ AddMoments(m0_add, m1_arr[i], m2_arr[i], m0, m1, m2);
179
+ }
180
+
181
+ return std::make_pair(m1, m2 / static_cast<math_t>(N - ddof));
182
+ }
183
+
184
+ template <typename T>
185
+ std::pair<opmath_t<T>, opmath_t<T>> RowwiseMoments(const T* X, int64_t N, int64_t ddof = 0) {
186
+ using Vec = vec::Vectorized<T>;
187
+ constexpr int64_t kVecSize = Vec::size();
188
+ const int64_t n = N / kVecSize;
189
+ const int64_t m = divup(n, kChunkSize);
190
+ const int64_t depth = utils::CeilLog2(m);
191
+ if (depth <= 4) {
192
+ return RowwiseMomentsImpl<T, 4>(X, N, ddof);
193
+ } else if (depth <= 8) {
194
+ return RowwiseMomentsImpl<T, 8>(X, N, ddof);
195
+ } else if (depth <= 16) {
196
+ return RowwiseMomentsImpl<T, 16>(X, N, ddof);
197
+ } else if (depth <= 32) {
198
+ return RowwiseMomentsImpl<T, 32>(X, N, ddof);
199
+ } else {
200
+ return RowwiseMomentsImpl<T, 64>(X, N, ddof);
201
+ }
202
+ }
203
+
204
+ } // namespace CPU_CAPABILITY
205
+ } // namespace native
206
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Activation.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/Activation.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ struct TensorIteratorBase;
7
+ class TensorBase;
8
+ }
9
+
10
+ namespace at { namespace native {
11
+
12
+ void launch_glu_backward_kernel(const TensorIteratorBase& iter,
13
+ int64_t gI_stride, int64_t I_stride);
14
+
15
+ void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter);
16
+
17
+ void GeluCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
18
+ void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
19
+
20
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDAJitLoops.cuh ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ // Jiterator functions are guarded behind this macro
5
+ #if AT_USE_JITERATOR()
6
+
7
+ #include <ATen/OpMathType.h>
8
+ #include <ATen/TensorIterator.h>
9
+ #include <ATen/core/Array.h>
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
12
+ #include <ATen/native/cuda/jit_utils.h>
13
+ #include <ATen/native/cuda/MemoryAccess.cuh>
14
+ #include <ATen/native/cuda/thread_constants.h>
15
+
16
+ #include <ATen/native/cuda/Loops.cuh>
17
+
18
+ #include <c10/macros/Macros.h>
19
+ #include <c10/core/ScalarType.h>
20
+ #include <c10/util/SmallBuffer.h>
21
+
22
+ #include <initializer_list>
23
+ #include <type_traits>
24
+ #include <tuple>
25
+ #include <mutex>
26
+
27
+ namespace at {
28
+ namespace native {
29
+
30
+ template <typename Tuple, std::size_t... I>
31
+ constexpr auto tuple_to_array_helper(Tuple& t, std::index_sequence<I...> seq) {
32
+ constexpr auto size = seq.size();
33
+ (void)t; // warning : unused parameter when tuple is empty.
34
+ return std::array<void*, size>{static_cast<void*>(&std::get<I>(t))...};
35
+ }
36
+
37
+ // Helper function convert tuple to std::array<void*, N>
38
+ // for passing the arguments to CUDA Kernel
39
+ // NOTE: We capture tuple by reference,
40
+ // so the pointers in returned array are only valid
41
+ // till tuple is alive.
42
+ template <typename ...Args>
43
+ constexpr auto tuple_to_array(std::tuple<Args...>& extra_args) {
44
+ constexpr auto tuple_size = sizeof...(Args);
45
+ return tuple_to_array_helper(extra_args, std::make_index_sequence<tuple_size>{});
46
+ }
47
+
48
+ struct JittedVecKernelCache {
49
+ // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
50
+ at::cuda::jit::NvrtcFunction vec1;
51
+ at::cuda::jit::NvrtcFunction vec2;
52
+ at::cuda::jit::NvrtcFunction vec4;
53
+ };
54
+
55
+ struct JittedKernelVariantCache {
56
+ JittedVecKernelCache vec;
57
+ at::cuda::jit::NvrtcFunction noncontiguous;
58
+ at::cuda::jit::NvrtcFunction dynamic_contiguous;
59
+ at::cuda::jit::NvrtcFunction dynamic_noncontiguous;
60
+ };
61
+
62
+ inline c10::SmallBuffer<void*, 64> pack_kernel_args(
63
+ std::initializer_list<void*> args,
64
+ c10::ArrayRef<void*> extra_args) {
65
+ c10::SmallBuffer<void*, 64> ret(args.size() + extra_args.size());
66
+ std::copy(args.begin(), args.end(), ret.data());
67
+ std::copy(extra_args.begin(), extra_args.end(), ret.data() + args.size());
68
+ return ret;
69
+ }
70
+
71
+ template<typename array_t,
72
+ typename inp_calc_t,
73
+ typename out_calc_t,
74
+ typename loader_t,
75
+ typename storer_t>
76
+ void launch_jitted_unrolled_kernel(
77
+ std::mutex &jiterator_mutex,
78
+ at::cuda::jit::NvrtcFunction &fn_cache,
79
+ const at::cuda::jit::KernelDescriptor &desc,
80
+ int64_t N,
81
+ array_t data,
82
+ inp_calc_t ic,
83
+ out_calc_t oc,
84
+ loader_t l,
85
+ storer_t s,
86
+ bool contiguous,
87
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
88
+ void* scalar_val,
89
+ c10::ArrayRef<void*> extra_args) {
90
+
91
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
92
+ //casting result to int is always safe, intermediate is int64 and won't overflow
93
+ const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
94
+
95
+ if (!fn_cache.function) {
96
+ const std::lock_guard<std::mutex> lock{jiterator_mutex};
97
+ if (!fn_cache.function) {
98
+ constexpr bool dynamic_casting = !std::is_same<decltype(l), memory::LoadWithoutCast>() ||
99
+ !std::is_same<decltype(s), memory::StoreWithoutCast>();
100
+ auto code = at::cuda::jit::generate_code(
101
+ desc, contiguous, dynamic_casting, scalar_pos);
102
+ fn_cache = at::cuda::jit::jit_pwise_function(code, desc.name);
103
+ }
104
+ }
105
+
106
+ auto args = pack_kernel_args({&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args);
107
+ at::cuda::jit::launch_jitted_pwise_function(fn_cache, args.data(), {grid, 1u, 1u},
108
+ {num_threads(), 1u, 1u});
109
+ }
110
+
111
+ template<int arity, typename array_t>
112
+ void launch_jitted_vectorized_kernel(
113
+ std::mutex &jiterator_mutex, JittedVecKernelCache &fn_cache,
114
+ const at::cuda::jit::KernelDescriptor &desc, int64_t N, array_t data,
115
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
116
+ void *scalar_val, c10::ArrayRef<void*> extra_args) {
117
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
118
+ // N is still int64_t for the computation, but it's always safe to cast result to int
119
+ const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
120
+ const int vec_size = at::cuda::jit::can_vectorize_up_to(
121
+ desc, c10::ArrayRef<char*>(data.data, data.size()));
122
+
123
+ // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
124
+ // fn_ptr is set to the appropriate function based on the vec size and GPU used
125
+ at::cuda::jit::NvrtcFunction* fn_ptr;
126
+ if (vec_size == 4) {
127
+ fn_ptr = &fn_cache.vec4;
128
+ } else if (vec_size == 2) {
129
+ fn_ptr = &fn_cache.vec2;
130
+ } else if (vec_size ==1) {
131
+ fn_ptr = &fn_cache.vec1;
132
+ } else {
133
+ TORCH_INTERNAL_ASSERT(false, "unexpected vec_size for jitter vectorized kernel");
134
+ }
135
+
136
+ bool vectorized = vec_size > 1;
137
+
138
+ if (!fn_ptr->function) {
139
+ const std::lock_guard<std::mutex> lock{jiterator_mutex};
140
+ if (!fn_ptr->function) { // cache miss!
141
+
142
+ // Generates program
143
+ auto code = at::cuda::jit::generate_code(
144
+ desc, /*contiguous=*/true, /*dynamic_casting=*/false,
145
+ scalar_pos, vectorized, vec_size);
146
+ std::string kernel_name = vectorized ? desc.name + "_vectorized" + std::to_string(vec_size) : desc.name;
147
+
148
+ // Acquires the program
149
+ *fn_ptr = at::cuda::jit::jit_pwise_function(code, kernel_name);
150
+ }
151
+ }
152
+
153
+ if (vectorized) {
154
+ auto args = pack_kernel_args({&N, &data, scalar_val}, extra_args);
155
+ at::cuda::jit::launch_jitted_pwise_function(
156
+ *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
157
+ } else {
158
+ // NVCC complains about unused variables l and s.
159
+ // It should be false positive in most cases, so we suppress the warnings.
160
+ #pragma nv_diagnostic push
161
+ #pragma nv_diag_suppress 177
162
+ auto ic = TrivialOffsetCalculator<arity>();
163
+ auto oc = TrivialOffsetCalculator<1>();
164
+ auto l = memory::LoadWithoutCast();
165
+ auto s = memory::StoreWithoutCast();
166
+
167
+ auto args = pack_kernel_args(
168
+ {&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args);
169
+ at::cuda::jit::launch_jitted_pwise_function(
170
+ *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
171
+ #pragma nv_diagnostic pop
172
+ }
173
+ }
174
+
175
+ template <int arity>
176
+ void jitted_gpu_kernel_generic(
177
+ std::mutex &jiterator_mutex,
178
+ JittedKernelVariantCache &cache,
179
+ const at::cuda::jit::KernelDescriptor &desc,
180
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
181
+ c10::ArrayRef<void*> extra_args,
182
+ TensorIteratorBase& iter,
183
+ const bool dynamic_casting,
184
+ void *scalar_val) {
185
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
186
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == arity);
187
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
188
+
189
+ constexpr int ntensors = arity + 1;
190
+ at::detail::Array<char*, ntensors> data;
191
+ for (auto i : c10::irange(ntensors)) {
192
+ data[i] = (char*)iter.data_ptr(i);
193
+ }
194
+
195
+ int64_t numel = iter.numel();
196
+ bool contiguous = iter.is_contiguous();
197
+
198
+ // Decides which of 4 kernel types to launch
199
+ // Variations are:
200
+ // - Case 1: no dynamic casting and contiguous
201
+ // - Case 2: no dynamic casting and noncontiguous
202
+ // - Case 3: dynamic casting and contiguous
203
+ // - Case 4: dynamic casting and noncontiguous
204
+ // These cases align with the non-jitted CUDALoops.cuh cases in gpu_kernel_impl
205
+
206
+ if (!dynamic_casting) {
207
+ if (contiguous) {
208
+ // Case 1: no dynamic casting and contiguous
209
+ launch_jitted_vectorized_kernel<arity>(
210
+ jiterator_mutex, cache.vec, desc,
211
+ numel, data, scalar_pos, scalar_val, extra_args);
212
+ return;
213
+ }
214
+
215
+ // Case 2: no dynamic casting and noncontiguous
216
+ auto input_offset_calculator = make_input_offset_calculator<arity>(iter);
217
+ auto output_offset_calculator = make_output_offset_calculator(iter);
218
+ auto loader = memory::LoadWithoutCast();
219
+ auto storer = memory::StoreWithoutCast();
220
+ launch_jitted_unrolled_kernel(
221
+ jiterator_mutex, cache.noncontiguous, desc, numel, data,
222
+ input_offset_calculator, output_offset_calculator, loader,
223
+ storer, contiguous, scalar_pos, scalar_val, extra_args);
224
+ return;
225
+ }
226
+
227
+ // Cases 3 and 4 are handled below
228
+ // Both require construction of a storer (this asserts 1 output) and one or more loaders
229
+
230
+ // Creates store cast to output (the zeroth tensor in TensorIterator)
231
+ auto storer = memory::StoreWithCast<1>(iter);
232
+
233
+ // Creates load casts from inputs (note offset indexing into the iterators 1...n tensors)
234
+ auto loader = memory::LoadWithCast<arity>(iter);
235
+
236
+ if (contiguous) {
237
+ // Case 3: dynamic casting and contiguous
238
+ auto input_offset_calculator = TrivialOffsetCalculator<arity>();
239
+ auto output_offset_calculator = TrivialOffsetCalculator<1>();
240
+ launch_jitted_unrolled_kernel(
241
+ jiterator_mutex, cache.dynamic_contiguous, desc, numel, data, input_offset_calculator,
242
+ output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args);
243
+ return;
244
+ }
245
+
246
+ // Case 4: dynamic casting and noncontiguous
247
+ auto input_offset_calculator = make_input_offset_calculator<arity>(iter);
248
+ auto output_offset_calculator = make_output_offset_calculator(iter);
249
+ launch_jitted_unrolled_kernel(
250
+ jiterator_mutex, cache.dynamic_noncontiguous, desc, numel, data, input_offset_calculator,
251
+ output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args);
252
+ }
253
+
254
+ // NOTE: static to reduce chances of name collision.
255
+ template <
256
+ char const* name,
257
+ typename result_type,
258
+ typename f_inputs_type,
259
+ int arity,
260
+ at::cuda::jit::BinaryFuncVariant scalar_pos =
261
+ at::cuda::jit::BinaryFuncVariant::NoScalar,
262
+ typename... ExtraArgs>
263
+ static void jitted_gpu_kernel_impl(
264
+ TensorIteratorBase& iter,
265
+ const std::string &f,
266
+ const bool dynamic_casting,
267
+ at::opmath_type<f_inputs_type> scalar_val,
268
+ std::tuple<ExtraArgs...> extra_args) {
269
+
270
+ // TODO: Memory use can probably be optimized by re-using kernels across GPUs with
271
+ // the same compute capability
272
+ static std::mutex jiterator_mutex;
273
+ static std::vector<JittedKernelVariantCache> device_caches(c10::cuda::device_count());
274
+
275
+ constexpr int nInputs = arity;
276
+ constexpr int nOutputs = 1; // TODO: Support more than 1 output
277
+ static const auto desc = at::cuda::jit::make_kernel_descriptor<
278
+ result_type, f_inputs_type, ExtraArgs...>(name, f, nInputs, nOutputs);
279
+
280
+ auto &cache = device_caches[iter.device().index()];
281
+ auto extra_args_array = tuple_to_array(extra_args);
282
+ return jitted_gpu_kernel_generic<arity>(
283
+ jiterator_mutex,
284
+ cache,
285
+ desc,
286
+ scalar_pos,
287
+ extra_args_array,
288
+ iter,
289
+ dynamic_casting,
290
+ &scalar_val
291
+ );
292
+ }
293
+
294
+ }} // at::native
295
+
296
+ #endif // AT_USE_JITERATOR()
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDALoops.cuh ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides two functions to help write GPU elementwise kernels:
4
+ //
5
+ // gpu_kernel(TensorIterator iter, <lambda>)
6
+ // gpu_kernel_with_scalars(TensorIterator iter, <lambda>)
7
+ //
8
+ // The gpu_kernel_with_scalars generates specializations that support a
9
+ // single scalar CPU argument, such as from `cuda_tensor + 5`. The CPU scalar
10
+ // is lifted to a kernel parameter instead of copying to device memory.
11
+ // This should be used in conjunction with TensorIterator::allow_cpu_scalars_,
12
+ // which is the default for TensorIterator::binary_op. Otherwise, all inputs
13
+ // and the output must be on the GPU.
14
+ //
15
+ // For example, to write a reciprocal kernel for GPU float Tensors:
16
+ //
17
+ // gpu_kernel(iter, []GPU_LAMBDA(float a) {
18
+ // return 1.0f / a;
19
+ // });
20
+ //
21
+ // To write a multiplication kernel for GPU float Tensors where one argument
22
+ // may be a CPU scalar:
23
+ //
24
+ // gpu_kernel_with_scalars(iter, []GPU_LAMBDA(float a, float b) {
25
+ // return a * b;
26
+ // });
27
+ //
28
+ // See BinaryOpsKernel.cu for the complete implementation
29
+ //
30
+
31
+ #include <iostream>
32
+ #include <tuple>
33
+ #include <type_traits>
34
+
35
+ #include <ATen/core/Array.h>
36
+ #include <ATen/cuda/CUDAContext.h>
37
+ #include <ATen/detail/FunctionTraits.h>
38
+ #include <ATen/native/TensorIterator.h>
39
+ #include <c10/core/DynamicCast.h>
40
+ #include <c10/core/ScalarType.h>
41
+ #include <c10/macros/Macros.h>
42
+ #include <c10/util/TypeCast.h>
43
+
44
+ #ifdef __NVCC__
45
+ #define ASSERT_HOST_DEVICE_LAMBDA(type) \
46
+ static_assert( \
47
+ __nv_is_extended_host_device_lambda_closure_type(type), \
48
+ #type " must be a __host__ __device__ lambda")
49
+ #else
50
+ #define ASSERT_HOST_DEVICE_LAMBDA(type)
51
+ #endif
52
+
53
+ namespace at {
54
+ namespace native {
55
+
56
+ template <int vec_size, typename func_t, typename array_t>
57
+ C10_LAUNCH_BOUNDS_1(num_threads())
58
+ __global__ void vectorized_elementwise_kernel(int N, func_t f, array_t data) {
59
+ using traits = function_traits<func_t>;
60
+ int remaining = N - block_work_size() * blockIdx.x;
61
+
62
+ if (remaining < block_work_size()) { // if this block handles the reminder,
63
+ // just do a naive unrolled loop
64
+ auto input_calc = TrivialOffsetCalculator<traits::arity>();
65
+ auto output_calc = TrivialOffsetCalculator<1>();
66
+ auto loader = memory::LoadWithoutCast();
67
+ auto storer = memory::StoreWithoutCast();
68
+ auto policy = memory::policies::unroll<
69
+ array_t,
70
+ decltype(input_calc),
71
+ decltype(output_calc),
72
+ memory::LoadWithoutCast,
73
+ memory::StoreWithoutCast>(
74
+ data, remaining, input_calc, output_calc, loader, storer);
75
+ elementwise_kernel_helper(f, policy);
76
+ } else { // if this block has a full `block_work_size` data to handle, use
77
+ // vectorized memory access
78
+ elementwise_kernel_helper(
79
+ f, memory::policies::vectorized<vec_size, array_t>(data));
80
+ }
81
+ }
82
+
83
+ template <
84
+ typename func_t,
85
+ typename array_t,
86
+ typename inp_calc_t,
87
+ typename out_calc_t,
88
+ typename loader_t,
89
+ typename storer_t>
90
+ C10_LAUNCH_BOUNDS_1(num_threads())
91
+ __global__ void unrolled_elementwise_kernel(
92
+ int N,
93
+ func_t f,
94
+ array_t data,
95
+ inp_calc_t ic,
96
+ out_calc_t oc,
97
+ loader_t l,
98
+ storer_t s) {
99
+ int remaining = N - block_work_size() * blockIdx.x;
100
+ auto policy = memory::policies::
101
+ unroll<array_t, inp_calc_t, out_calc_t, loader_t, storer_t>(
102
+ data, remaining, ic, oc, l, s);
103
+ elementwise_kernel_helper(f, policy);
104
+ }
105
+
106
+ // this function assume trivial 1d and no dynamic casting
107
+ template <typename func_t, typename array_t>
108
+ static inline void launch_vectorized_kernel(
109
+ int64_t N,
110
+ const func_t& f,
111
+ array_t data) {
112
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
113
+ using traits = function_traits<func_t>;
114
+ int64_t grid = (N + block_work_size() - 1) / block_work_size();
115
+ auto stream = at::cuda::getCurrentCUDAStream();
116
+ int vec_size = memory::can_vectorize_up_to<func_t>(data);
117
+
118
+ switch (vec_size) {
119
+ case 4:
120
+ vectorized_elementwise_kernel<4, func_t, array_t>
121
+ <<<grid, num_threads(), 0, stream>>>(N, f, data);
122
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
123
+ break;
124
+ case 2:
125
+ vectorized_elementwise_kernel<2, func_t, array_t>
126
+ <<<grid, num_threads(), 0, stream>>>(N, f, data);
127
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
128
+ break;
129
+ case 1: {
130
+ auto input_calc = TrivialOffsetCalculator<traits::arity>();
131
+ auto output_calc = TrivialOffsetCalculator<1>();
132
+ auto loader = memory::LoadWithoutCast();
133
+ auto storer = memory::StoreWithoutCast();
134
+ unrolled_elementwise_kernel<func_t, array_t>
135
+ <<<grid, num_threads(), 0, stream>>>(
136
+ N, f, data, input_calc, output_calc, loader, storer);
137
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
138
+ break;
139
+ }
140
+ default:
141
+ TORCH_INTERNAL_ASSERT(false, "Unexpected vectorization size");
142
+ }
143
+ }
144
+
145
+ template <
146
+ typename func_t,
147
+ typename array_t,
148
+ typename inp_calc_t,
149
+ typename out_calc_t,
150
+ typename loader_t,
151
+ typename storer_t>
152
+ static inline void launch_unrolled_kernel(
153
+ int64_t N,
154
+ const func_t& f,
155
+ array_t data,
156
+ inp_calc_t ic,
157
+ out_calc_t oc,
158
+ loader_t l,
159
+ storer_t s) {
160
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
161
+ int64_t grid = (N + block_work_size() - 1) / block_work_size();
162
+ auto stream = at::cuda::getCurrentCUDAStream();
163
+ unrolled_elementwise_kernel<func_t, array_t>
164
+ <<<grid, num_threads(), 0, stream>>>(N, f, data, ic, oc, l, s);
165
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
166
+ }
167
+
168
+ template <int nt, int vt, typename func_t>
169
+ C10_LAUNCH_BOUNDS_2(nt, 4)
170
+ __global__ void elementwise_kernel(int N, func_t f) {
171
+ int tid = threadIdx.x;
172
+ int nv = nt * vt;
173
+ int idx = nv * blockIdx.x + tid;
174
+ #pragma unroll
175
+ for (int i = 0; i < vt; i++) {
176
+ if (idx < N) {
177
+ f(idx);
178
+ idx += nt;
179
+ }
180
+ }
181
+ }
182
+
183
+ template <int nt, int vt, typename func_t>
184
+ static void launch_legacy_kernel(int64_t N, const func_t& f) {
185
+ TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
186
+ if (N == 0) {
187
+ return;
188
+ }
189
+ dim3 block(nt);
190
+ dim3 grid((N + block.x * vt - 1) / (block.x * vt));
191
+ auto stream = at::cuda::getCurrentCUDAStream();
192
+ elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
193
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
194
+ }
195
+
196
+ template <typename traits, typename func_t, typename index_t, size_t... INDEX>
197
+ C10_HOST_DEVICE typename traits::result_type invoke_impl(
198
+ const func_t& f,
199
+ char* const C10_RESTRICT data[],
200
+ const index_t strides[],
201
+ int i,
202
+ std::index_sequence<INDEX...>) {
203
+ (void)strides;
204
+ (void)i;
205
+ return f(c10::load<typename traits::template arg<INDEX>::type>(
206
+ data[INDEX] + i * strides[INDEX])...);
207
+ }
208
+
209
+ template <
210
+ typename func_t,
211
+ typename index_t,
212
+ typename traits = function_traits<func_t>>
213
+ C10_HOST_DEVICE typename traits::result_type invoke(
214
+ const func_t& f,
215
+ char* const C10_RESTRICT data[],
216
+ const index_t strides[],
217
+ int i) {
218
+ using Indices = std::make_index_sequence<traits::arity>;
219
+ return invoke_impl<traits>(f, data, strides, i, Indices{});
220
+ }
221
+
222
+ template <typename traits, typename func_t, typename index_t, size_t... I>
223
+ C10_HOST_DEVICE typename traits::result_type invoke_impl(
224
+ const func_t& f,
225
+ char* const C10_RESTRICT data[],
226
+ const index_t strides[],
227
+ const ScalarType dtypes[],
228
+ int i,
229
+ std::index_sequence<I...>) {
230
+ (void)strides;
231
+ (void)i;
232
+ return f(c10::fetch_and_cast<typename traits::template arg<I>::type>(
233
+ dtypes[I], data[I] + i * strides[I])...);
234
+ }
235
+
236
+ template <
237
+ typename func_t,
238
+ typename index_t,
239
+ typename traits = function_traits<func_t>>
240
+ C10_HOST_DEVICE typename traits::result_type invoke(
241
+ const func_t& f,
242
+ char* const C10_RESTRICT data[],
243
+ const index_t strides[],
244
+ const ScalarType dtypes[],
245
+ int i) {
246
+ using Indices = std::make_index_sequence<traits::arity>;
247
+ return invoke_impl<traits>(f, data, strides, dtypes, i, Indices{});
248
+ }
249
+
250
+ template <typename func_t>
251
+ void gpu_kernel_impl_nocast(TensorIteratorBase& iter, const func_t& f) {
252
+ using traits = function_traits<func_t>;
253
+ using arg0_t = typename traits::result_type;
254
+ constexpr int ntensors = traits::arity + 1;
255
+
256
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
257
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
258
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
259
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
260
+
261
+ at::detail::Array<char*, ntensors> data;
262
+ for (int i = 0; i < ntensors; i++) {
263
+ data[i] = (char*)iter.data_ptr(i);
264
+ }
265
+
266
+ int64_t numel = iter.numel();
267
+
268
+ bool contiguous = iter.is_contiguous();
269
+
270
+ if (contiguous) {
271
+ return launch_vectorized_kernel(numel, f, data);
272
+ }
273
+ auto offset_calc = ::make_offset_calculator<traits::arity + 1>(iter);
274
+ constexpr int unroll_factor = sizeof(arg0_t) >= 4 ? 2 : 4;
275
+ launch_legacy_kernel<128, unroll_factor>(numel, [=] GPU_LAMBDA(int idx) {
276
+ auto offsets = offset_calc.get(idx);
277
+ arg0_t* out = (arg0_t*)(data[0] + offsets[0]);
278
+ *out = invoke(f, &data.data[1], &offsets.data[1], 1);
279
+ });
280
+ }
281
+
282
+ template <typename func_t>
283
+ void gpu_kernel_impl(TensorIteratorBase& iter, const func_t& f) {
284
+ if (!needs_dynamic_casting<func_t>::check(iter)) {
285
+ return gpu_kernel_impl_nocast(iter, f);
286
+ }
287
+ using traits = function_traits<func_t>;
288
+ using arg0_t = typename traits::result_type;
289
+ constexpr int ntensors = traits::arity + 1;
290
+
291
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
292
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
293
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
294
+
295
+ at::detail::Array<char*, ntensors> data;
296
+ for (int i = 0; i < ntensors; i++) {
297
+ data[i] = (char*)iter.data_ptr(i);
298
+ }
299
+
300
+ int64_t numel = iter.numel();
301
+
302
+ bool contiguous = iter.is_contiguous();
303
+
304
+ if (contiguous) {
305
+ #ifdef USE_ROCM
306
+ at::detail::Array<ScalarType, ntensors> dtypes;
307
+ auto inner_strides = iter.get_inner_strides();
308
+ at::detail::Array<int, ntensors> strides;
309
+ for (int i = 0; i < ntensors; i++) {
310
+ dtypes[i] = iter.dtype(i);
311
+ strides[i] = inner_strides[i];
312
+ }
313
+ launch_legacy_kernel<512, 1>(numel, [=]GPU_LAMBDA(int idx) {
314
+ void* out = data[0] + strides[0] * idx;
315
+ arg0_t result = invoke(f, &data.data[1], &strides.data[1], &dtypes.data[1], idx);
316
+ c10::cast_and_store<arg0_t>(dtypes[0], out, result);
317
+ });
318
+ #else
319
+ auto loader = memory::LoadWithCast<traits::arity>(iter);
320
+ auto storer = memory::StoreWithCast<1>(iter);
321
+ auto input_offset_calculator = TrivialOffsetCalculator<traits::arity>();
322
+ auto output_offset_calculator = TrivialOffsetCalculator<1>();
323
+ launch_unrolled_kernel(
324
+ numel,
325
+ f,
326
+ data,
327
+ input_offset_calculator,
328
+ output_offset_calculator,
329
+ loader,
330
+ storer);
331
+ #endif
332
+ } else {
333
+ at::detail::Array<ScalarType, ntensors> dtypes;
334
+ for (int i = 0; i < ntensors; i++) {
335
+ dtypes[i] = iter.dtype(i);
336
+ }
337
+ auto offset_calc = ::make_offset_calculator<traits::arity + 1>(iter);
338
+ launch_legacy_kernel<128, 4>(numel, [=] GPU_LAMBDA(int idx) {
339
+ auto offsets = offset_calc.get(idx);
340
+ void* out = data[0] + offsets[0];
341
+ arg0_t result = invoke(f, &data.data[1], &offsets.data[1], &dtypes.data[1], 1);
342
+ c10::cast_and_store<arg0_t>(dtypes[0], out, result);
343
+ });
344
+ }
345
+ }
346
+
347
+ } // namespace native
348
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CompositeRandomAccessor.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/CompositeRandomAccessorCommon.h>
4
+ #include <thrust/tuple.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ struct TupleInfoCPU {
9
+ template <typename ...Types>
10
+ using tuple = thrust::tuple<Types...>;
11
+
12
+ template <typename ...Types>
13
+ static constexpr auto tie(Types&... args) noexcept {
14
+ return thrust::tie(args...);
15
+ }
16
+ };
17
+
18
+ template <typename KeyAccessor, typename ValueAccessor>
19
+ using CompositeRandomAccessorCPU =
20
+ CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;
21
+
22
+ template <typename Values, typename References>
23
+ void swap(
24
+ references_holder<Values, References> rh1,
25
+ references_holder<Values, References> rh2
26
+ ) {
27
+ return thrust::swap(rh1.data(), rh2.data());
28
+ }
29
+
30
+ template <int N, typename Values, typename References>
31
+ auto get(references_holder<Values, References> rh) -> decltype(thrust::get<N>(rh.data())) {
32
+ return thrust::get<N>(rh.data());
33
+ }
34
+
35
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTPlanCache.h ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Config.h>
2
+ #include <ATen/core/DimVector.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/native/cuda/CuFFTUtils.h>
5
+ #include <ATen/native/utils/ParamsHash.h>
6
+ #include <c10/util/accumulate.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ #include <cufft.h>
10
+ #include <cufftXt.h>
11
+
12
+ #include <limits>
13
+ #include <list>
14
+ #include <sstream>
15
+ #include <stdexcept>
16
+ #include <string>
17
+ #include <unordered_map>
18
+
19
+ namespace at { namespace native { namespace detail {
20
+
21
+ // Enum representing the FFT type
22
+ enum class CuFFTTransformType : int8_t {
23
+ C2C, // Complex-to-complex
24
+ R2C, // Real-to-complex
25
+ C2R, // Complex-to-real
26
+ };
27
+
28
+ // This struct is used to let us easily compute hashes of the
29
+ // parameters.
30
+ // It will be the **key** to the plan cache.
31
+ struct CuFFTParams
32
+ {
33
+ int64_t signal_ndim_; // between 1 and max_rank, i.e., 1 <= signal_ndim <= 3
34
+ // These include additional batch dimension as well.
35
+ int64_t sizes_[max_rank + 1];
36
+ int64_t input_strides_[max_rank + 1];
37
+ int64_t output_strides_[max_rank + 1];
38
+ CuFFTTransformType fft_type_;
39
+ ScalarType value_type_;
40
+
41
+ CuFFTParams() = default;
42
+
43
+ CuFFTParams(IntArrayRef in_strides, IntArrayRef out_strides,
44
+ IntArrayRef signal_sizes, CuFFTTransformType fft_type, ScalarType value_type) {
45
+ // Padding bits must be zeroed for hashing
46
+ memset(this, 0, sizeof(*this));
47
+ signal_ndim_ = signal_sizes.size() - 1;
48
+ fft_type_ = fft_type;
49
+ value_type_ = value_type;
50
+
51
+ TORCH_INTERNAL_ASSERT(in_strides.size() == signal_sizes.size());
52
+ TORCH_INTERNAL_ASSERT(out_strides.size() == signal_sizes.size());
53
+ TORCH_INTERNAL_ASSERT(1 <= signal_ndim_ && signal_ndim_ <= max_rank);
54
+
55
+ std::copy(signal_sizes.cbegin(), signal_sizes.cend(), sizes_);
56
+ std::copy(in_strides.cbegin(), in_strides.cend(), input_strides_);
57
+ std::copy(out_strides.cbegin(), out_strides.cend(), output_strides_);
58
+ }
59
+ };
60
+
61
+ static_assert(std::is_trivial<CuFFTParams>::value, "");
62
+
63
+ // Returns true if the transform type has complex input
64
+ inline bool cufft_complex_input(CuFFTTransformType type) {
65
+ switch (type) {
66
+ case CuFFTTransformType::C2C:
67
+ case CuFFTTransformType::C2R:
68
+ return true;
69
+
70
+ case CuFFTTransformType::R2C:
71
+ return false;
72
+ }
73
+ TORCH_INTERNAL_ASSERT(false);
74
+ }
75
+
76
+ // Returns true if the transform type has complex output
77
+ inline bool cufft_complex_output(CuFFTTransformType type) {
78
+ switch (type) {
79
+ case CuFFTTransformType::C2C:
80
+ case CuFFTTransformType::R2C:
81
+ return true;
82
+
83
+ case CuFFTTransformType::C2R:
84
+ return false;
85
+ }
86
+ TORCH_INTERNAL_ASSERT(false);
87
+ }
88
+
89
+ // Create transform type enum from bools representing if input and output are complex
90
+ inline CuFFTTransformType GetCuFFTTransformType(bool complex_input, bool complex_output) {
91
+ if (complex_input && complex_output) {
92
+ return CuFFTTransformType::C2C;
93
+ } else if (complex_input && !complex_output) {
94
+ return CuFFTTransformType::C2R;
95
+ } else if (!complex_input && complex_output) {
96
+ return CuFFTTransformType::R2C;
97
+ }
98
+ TORCH_INTERNAL_ASSERT(false, "Real to real FFTs are not supported");
99
+ }
100
+
101
+
102
+ class CuFFTHandle {
103
+ ::cufftHandle handle_;
104
+ public:
105
+
106
+ CuFFTHandle() {
107
+ CUFFT_CHECK(cufftCreate(&handle_));
108
+ }
109
+
110
+ ::cufftHandle & get() { return handle_; }
111
+ const ::cufftHandle & get() const { return handle_; }
112
+
113
+ ~CuFFTHandle() {
114
+ // Not using fftDestroy() for rocFFT to work around double freeing of handles
115
+ #if !defined(USE_ROCM)
116
+ cufftDestroy(handle_);
117
+ #endif
118
+ }
119
+ };
120
+
121
+ __forceinline__
122
+ static bool is_pow_of_two(int64_t x) {
123
+ return (x & (x - 1)) == 0;
124
+ }
125
+
126
+ using cufft_size_type = long long int;
127
+
128
+ using CuFFTDimVector = c10::SmallVector<cufft_size_type, at::kDimVectorStaticSize>;
129
+
130
+ // Struct representing a tensor in CuFFT's data layout for planning transforms
131
+ // See NOTE [ cuFFT Embedded Strides ].
132
+ struct CuFFTDataLayout {
133
+ CuFFTDimVector embed;
134
+ cufft_size_type stride, dist;
135
+ bool must_clone, simple;
136
+ };
137
+
138
+ // Returns a cufft embedding for a contiguous signal of the given size.
139
+ // e.g. if the input is cloned, this will be the resulting data layout
140
+ // See NOTE [ cuFFT Embedded Strides ].
141
+ inline CuFFTDataLayout cufft_simple_embed(IntArrayRef sizes, bool onesided) {
142
+ CuFFTDataLayout layout;
143
+ layout.simple = true;
144
+ layout.must_clone = false;
145
+ layout.embed.assign(sizes.cbegin() + 1, sizes.cend());
146
+ if (onesided) {
147
+ layout.embed.back() = sizes.back() / 2 + 1;
148
+ }
149
+ layout.stride = 1;
150
+ layout.dist = 1;
151
+ for (const auto& len : layout.embed) {
152
+ layout.dist *= len;
153
+ }
154
+ return layout;
155
+ }
156
+
157
+ // Convert strides to a CuFFT embedded representation.
158
+ // If strides cannot be embedded, returns a simple layout and sets must_clone flag
159
+ // See NOTE [ cuFFT Embedded Strides ].
160
+ inline CuFFTDataLayout as_cufft_embed(IntArrayRef strides, IntArrayRef sizes, bool onesided) {
161
+ const auto signal_ndim = strides.size() - 1;
162
+ CuFFTDataLayout layout;
163
+ auto last_stride = strides[signal_ndim];
164
+ layout.must_clone = (last_stride <= 0);
165
+
166
+ const auto last_dim_size = onesided ?
167
+ sizes[signal_ndim] / 2 + 1 : sizes[signal_ndim];
168
+ const auto signal_numel = c10::multiply_integers(sizes.slice(1, sizes.size() - 2)) * last_dim_size;
169
+
170
+ // Zero stides are not allowed, even if the batch size is one.
171
+ // If that happens just set a dummy case
172
+ if (sizes[0] == 1) {
173
+ layout.dist = signal_numel;
174
+ } else if (strides[0] == 0) {
175
+ layout.must_clone = true;
176
+ } else {
177
+ layout.dist = strides[0];
178
+ }
179
+
180
+ // Calculate the embedding shape, or set must_clone if the strides cannot be embedded
181
+ layout.embed.resize(signal_ndim);
182
+ for (auto i = signal_ndim - 1; !layout.must_clone && i > 0; i--) {
183
+ auto stride = strides[i];
184
+ if (sizes[i] == 1) {
185
+ layout.embed[i] = 1;
186
+ } else if (stride > 0 && stride % last_stride == 0) {
187
+ layout.embed[i] = stride / last_stride;
188
+ last_stride = stride;
189
+ } else {
190
+ layout.must_clone = true;
191
+ }
192
+ }
193
+
194
+ if (layout.must_clone) {
195
+ // If the input needs to be cloned, assume it will be contiguous
196
+ layout = cufft_simple_embed(sizes, onesided);
197
+ layout.must_clone = true;
198
+ } else {
199
+ layout.embed[0] = sizes[1];
200
+ layout.stride = strides[signal_ndim];
201
+ // Determine if layout represents a simple embedding (contiguous data)
202
+ layout.simple = [&] {
203
+ for (const auto i : c10::irange(1, signal_ndim - 1)) {
204
+ if (layout.embed[i] != sizes[i + 1]) {
205
+ return false;
206
+ }
207
+ }
208
+
209
+ return (layout.stride == 1 && layout.dist == signal_numel &&
210
+ layout.embed.back() == last_dim_size);
211
+ }();
212
+ }
213
+ return layout;
214
+ }
215
+
216
+ // This class contains all the information needed to execute a cuFFT plan:
217
+ // 1. the plan
218
+ // 2. whether to clone input before executing the plan
219
+ // 3. the workspace size needed
220
+ //
221
+ // This class will be the **value** in the plan cache.
222
+ // It **owns** the raw plan via a unique_ptr.
223
+ class CuFFTConfig {
224
+ public:
225
+
226
+ // Only move semantics is enought for this class. Although we already use
227
+ // unique_ptr for the plan, still remove copy constructor and assignment op so
228
+ // we don't accidentally copy and take perf hit.
229
+ CuFFTConfig(const CuFFTConfig&) = delete;
230
+ CuFFTConfig& operator=(CuFFTConfig const&) = delete;
231
+
232
+ explicit CuFFTConfig(const CuFFTParams& params):
233
+ CuFFTConfig(
234
+ IntArrayRef(params.input_strides_, params.signal_ndim_ + 1),
235
+ IntArrayRef(params.output_strides_, params.signal_ndim_ + 1),
236
+ IntArrayRef(params.sizes_, params.signal_ndim_ + 1),
237
+ params.fft_type_,
238
+ params.value_type_) {}
239
+
240
+ // For complex types, strides are in units of 2 * element_size(dtype)
241
+ // sizes are for the full signal, including batch size and always two-sided
242
+ CuFFTConfig(IntArrayRef in_strides, IntArrayRef out_strides,
243
+ IntArrayRef sizes, CuFFTTransformType fft_type, ScalarType dtype):
244
+ fft_type_(fft_type), value_type_(dtype) {
245
+
246
+ // signal sizes (excluding batch dim)
247
+ CuFFTDimVector signal_sizes(sizes.begin() + 1, sizes.end());
248
+
249
+ // input batch size
250
+ const int64_t batch = sizes[0];
251
+ const int64_t signal_ndim = sizes.size() - 1;
252
+
253
+ // Since cuFFT has limited non-unit stride support and various constraints, we
254
+ // use a flag to keep track throughout this function to see if we need to
255
+ // input = input.clone();
256
+
257
+ #if defined(USE_ROCM)
258
+ // clone input to avoid issues with hipfft clobering the input and failing tests
259
+ clone_input = true;
260
+ #else
261
+ clone_input = false;
262
+ #endif
263
+
264
+ // For half, base strides on the real part of real-to-complex and
265
+ // complex-to-real transforms are not supported. Since our output is always
266
+ // contiguous, only need to check real-to-complex case.
267
+ if (dtype == ScalarType::Half) {
268
+ // cuFFT on half requires compute capability of at least SM_53
269
+ auto dev_prop = at::cuda::getCurrentDeviceProperties();
270
+ TORCH_CHECK(dev_prop->major >= 5 && !(dev_prop->major == 5 && dev_prop->minor < 3),
271
+ "cuFFT doesn't support signals of half type with compute "
272
+ "capability less than SM_53, but the device containing input half "
273
+ "tensor only has SM_", dev_prop->major, dev_prop->minor);
274
+ for (const auto i : c10::irange(signal_ndim)) {
275
+ TORCH_CHECK(is_pow_of_two(sizes[i + 1]),
276
+ "cuFFT only supports dimensions whose sizes are powers of two when"
277
+ " computing in half precision, but got a signal size of",
278
+ sizes.slice(1));
279
+ }
280
+ clone_input |= in_strides.back() != 1;
281
+ }
282
+
283
+ CuFFTDataLayout in_layout;
284
+ if (clone_input) {
285
+ in_layout = cufft_simple_embed(sizes, fft_type == CuFFTTransformType::C2R);
286
+ } else {
287
+ in_layout = as_cufft_embed(in_strides, sizes, fft_type == CuFFTTransformType::C2R);
288
+ }
289
+ auto out_layout = as_cufft_embed(out_strides, sizes, fft_type == CuFFTTransformType::R2C);
290
+ TORCH_INTERNAL_ASSERT(!out_layout.must_clone, "Out strides cannot be represented as CuFFT embedding");
291
+ clone_input |= in_layout.must_clone;
292
+
293
+ // Check if we can take advantage of simple data layout.
294
+ //
295
+ // See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu.
296
+
297
+ const bool simple_layout = in_layout.simple && out_layout.simple;
298
+ cudaDataType itype, otype, exec_type;
299
+ const auto complex_input = cufft_complex_input(fft_type);
300
+ const auto complex_output = cufft_complex_output(fft_type);
301
+ if (dtype == ScalarType::Float) {
302
+ itype = complex_input ? CUDA_C_32F : CUDA_R_32F;
303
+ otype = complex_output ? CUDA_C_32F : CUDA_R_32F;
304
+ exec_type = CUDA_C_32F;
305
+ } else if (dtype == ScalarType::Double) {
306
+ itype = complex_input ? CUDA_C_64F : CUDA_R_64F;
307
+ otype = complex_output ? CUDA_C_64F : CUDA_R_64F;
308
+ exec_type = CUDA_C_64F;
309
+ } else if (dtype == ScalarType::Half) {
310
+ itype = complex_input ? CUDA_C_16F : CUDA_R_16F;
311
+ otype = complex_output ? CUDA_C_16F : CUDA_R_16F;
312
+ exec_type = CUDA_C_16F;
313
+ } else {
314
+ TORCH_CHECK(false, "cuFFT doesn't support tensor of type: ", dtype);
315
+ }
316
+
317
+ // disable auto allocation of workspace to use THC allocator
318
+ CUFFT_CHECK(cufftSetAutoAllocation(plan(), /* autoAllocate */ 0));
319
+
320
+ size_t ws_size_t;
321
+
322
+ // make plan
323
+ if (simple_layout) {
324
+ // If with unit-stride, we tell cuFFT by setting inembed == onembed == NULL.
325
+ // In such case, cuFFT ignores istride, ostride, idist, and odist
326
+ // by assuming istride = ostride = 1.
327
+ //
328
+ // See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu.
329
+ CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
330
+ /* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1, itype,
331
+ /* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1, otype,
332
+ batch, &ws_size_t, exec_type));
333
+ } else {
334
+ CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
335
+ in_layout.embed.data(), in_layout.stride, in_layout.dist, itype,
336
+ out_layout.embed.data(), out_layout.stride, out_layout.dist, otype,
337
+ batch, &ws_size_t, exec_type));
338
+ }
339
+ ws_size = static_cast<int64_t>(ws_size_t);
340
+ }
341
+
342
+ const cufftHandle &plan() const { return plan_ptr.get(); }
343
+
344
+ CuFFTTransformType transform_type() const { return fft_type_; }
345
+ ScalarType data_type() const { return value_type_; }
346
+ bool should_clone_input() const { return clone_input; }
347
+ int64_t workspace_size() const { return ws_size; }
348
+
349
+ private:
350
+ CuFFTHandle plan_ptr;
351
+ bool clone_input;
352
+ int64_t ws_size;
353
+ CuFFTTransformType fft_type_;
354
+ ScalarType value_type_;
355
+ };
356
+
357
+ #if defined(USE_ROCM)
358
+ // Note that the max plan number for CUDA version < 10 has to be 1023
359
+ // due to a bug that fails on the 1024th plan
360
+ constexpr int64_t CUFFT_MAX_PLAN_NUM = 1023;
361
+ constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = CUFFT_MAX_PLAN_NUM;
362
+ #else
363
+ constexpr int64_t CUFFT_MAX_PLAN_NUM = std::numeric_limits<int64_t>::max();
364
+ // The default max cache size chosen for CUDA version > 10 is arbitrary.
365
+ // This number puts a limit on how big of a plan cache should we maintain by
366
+ // default. Users can always configure it via cufft_set_plan_cache_max_size.
367
+ constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = 4096;
368
+ #endif
369
+ static_assert(0 <= CUFFT_MAX_PLAN_NUM && CUFFT_MAX_PLAN_NUM <= std::numeric_limits<int64_t>::max(),
370
+ "CUFFT_MAX_PLAN_NUM not in size_t range");
371
+ static_assert(CUFFT_DEFAULT_CACHE_SIZE >= 0 && CUFFT_DEFAULT_CACHE_SIZE <= CUFFT_MAX_PLAN_NUM,
372
+ "CUFFT_DEFAULT_CACHE_SIZE not in [0, CUFFT_MAX_PLAN_NUM] range");
373
+
374
+ // This cache assumes that the mapping from key to value never changes.
375
+ // This is **NOT** thread-safe. Please use a mutex when using it **AND** the
376
+ // value returned from try_emplace_value.
377
+ // The contract of using this cache is that try_emplace_value should only be
378
+ // used when the max_size is positive.
379
+ class CuFFTParamsLRUCache {
380
+ public:
381
+ using kv_t = typename std::pair<CuFFTParams, CuFFTConfig>;
382
+ using map_t = typename std::unordered_map<std::reference_wrapper<CuFFTParams>,
383
+ typename std::list<kv_t>::iterator,
384
+ ParamsHash<CuFFTParams>,
385
+ ParamsEqual<CuFFTParams>>;
386
+ using map_kkv_iter_t = typename map_t::iterator;
387
+
388
+
389
+ CuFFTParamsLRUCache() : CuFFTParamsLRUCache(CUFFT_DEFAULT_CACHE_SIZE) {}
390
+
391
+ CuFFTParamsLRUCache(int64_t max_size) {
392
+ _set_max_size(max_size);
393
+ }
394
+
395
+ CuFFTParamsLRUCache(CuFFTParamsLRUCache&& other) noexcept :
396
+ _usage_list(std::move(other._usage_list)),
397
+ _cache_map(std::move(other._cache_map)),
398
+ _max_size(other._max_size) {}
399
+
400
+ CuFFTParamsLRUCache& operator=(CuFFTParamsLRUCache&& other) noexcept {
401
+ _usage_list = std::move(other._usage_list);
402
+ _cache_map = std::move(other._cache_map);
403
+ _max_size = other._max_size;
404
+ return *this;
405
+ }
406
+
407
+ // If key is in this cache, return the cached config. Otherwise, emplace the
408
+ // config in this cache and return it.
409
+ // Return const reference because CuFFTConfig shouldn't be tampered with once
410
+ // created.
411
+ const CuFFTConfig &lookup(CuFFTParams params) {
412
+ AT_ASSERT(_max_size > 0);
413
+
414
+ map_kkv_iter_t map_it = _cache_map.find(params);
415
+ // Hit, put to list front
416
+ if (map_it != _cache_map.end()) {
417
+ _usage_list.splice(_usage_list.begin(), _usage_list, map_it->second);
418
+ return map_it->second->second;
419
+ }
420
+
421
+ // Miss
422
+ // remove if needed
423
+ if (_usage_list.size() >= _max_size) {
424
+ auto last = _usage_list.end();
425
+ last--;
426
+ _cache_map.erase(last->first);
427
+ _usage_list.pop_back();
428
+ }
429
+
430
+ // construct new plan at list front, then insert into _cache_map
431
+ _usage_list.emplace_front(std::piecewise_construct,
432
+ std::forward_as_tuple(params),
433
+ std::forward_as_tuple(params));
434
+ auto kv_it = _usage_list.begin();
435
+ _cache_map.emplace(std::piecewise_construct,
436
+ std::forward_as_tuple(kv_it->first),
437
+ std::forward_as_tuple(kv_it));
438
+ return kv_it->second;
439
+ }
440
+
441
+ void clear() {
442
+ _cache_map.clear();
443
+ _usage_list.clear();
444
+ }
445
+
446
+ void resize(int64_t new_size) {
447
+ _set_max_size(new_size);
448
+ auto cur_size = _usage_list.size();
449
+ if (cur_size > _max_size) {
450
+ auto delete_it = _usage_list.end();
451
+ for (size_t i = 0; i < cur_size - _max_size; i++) {
452
+ delete_it--;
453
+ _cache_map.erase(delete_it->first);
454
+ }
455
+ _usage_list.erase(delete_it, _usage_list.end());
456
+ }
457
+ }
458
+
459
+ size_t size() const { return _cache_map.size(); }
460
+
461
+ size_t max_size() const noexcept { return _max_size; }
462
+
463
+ std::mutex mutex;
464
+
465
+ private:
466
+ // Only sets size and does value check. Does not resize the data structures.
467
+ void _set_max_size(int64_t new_size) {
468
+ // We check that 0 <= new_size <= CUFFT_MAX_PLAN_NUM here. Since
469
+ // CUFFT_MAX_PLAN_NUM is of type size_t, we need to do non-negativity check
470
+ // first.
471
+ TORCH_CHECK(new_size >= 0,
472
+ "cuFFT plan cache size must be non-negative, but got ", new_size);
473
+ TORCH_CHECK(new_size <= CUFFT_MAX_PLAN_NUM,
474
+ "cuFFT plan cache size can not be larger than ", CUFFT_MAX_PLAN_NUM, ", but got ", new_size);
475
+ _max_size = static_cast<size_t>(new_size);
476
+ }
477
+
478
+ std::list<kv_t> _usage_list;
479
+ map_t _cache_map;
480
+ size_t _max_size;
481
+ };
482
+
483
+ // Since ATen is separated into CPU build and CUDA build, we need a way to call
484
+ // these functions only when CUDA is loaded. We use CUDA hooks for this purpose
485
+ // (at cuda/detail/CUDAHooks.cpp), and call the hooked functions from the actual
486
+ // native function counterparts (at native/SpectralOps.cpp), i.e.,
487
+ // _cufft_get_plan_cache_max_size, _cufft_set_plan_cache_max_size
488
+ // _cufft_get_plan_cache_size, and _cufft_clear_plan_cache.
489
+ int64_t cufft_get_plan_cache_max_size_impl(DeviceIndex device_index);
490
+ void cufft_set_plan_cache_max_size_impl(DeviceIndex device_index, int64_t max_size);
491
+ int64_t cufft_get_plan_cache_size_impl(DeviceIndex device_index);
492
+ void cufft_clear_plan_cache_impl(DeviceIndex device_index);
493
+
494
+ }}} // namespace at::native::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTUtils.h ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+
5
+ #include <string>
6
+ #include <stdexcept>
7
+ #include <sstream>
8
+ #include <cufft.h>
9
+ #include <cufftXt.h>
10
+
11
+ namespace at { namespace native {
12
+
13
+ // This means that max dim is 3 + 2 = 5 with batch dimension and possible
14
+ // complex dimension
15
+ constexpr int max_rank = 3;
16
+
17
+ static inline std::string _cudaGetErrorEnum(cufftResult error)
18
+ {
19
+ switch (error)
20
+ {
21
+ case CUFFT_SUCCESS:
22
+ return "CUFFT_SUCCESS";
23
+ case CUFFT_INVALID_PLAN:
24
+ return "CUFFT_INVALID_PLAN";
25
+ case CUFFT_ALLOC_FAILED:
26
+ return "CUFFT_ALLOC_FAILED";
27
+ case CUFFT_INVALID_TYPE:
28
+ return "CUFFT_INVALID_TYPE";
29
+ case CUFFT_INVALID_VALUE:
30
+ return "CUFFT_INVALID_VALUE";
31
+ case CUFFT_INTERNAL_ERROR:
32
+ return "CUFFT_INTERNAL_ERROR";
33
+ case CUFFT_EXEC_FAILED:
34
+ return "CUFFT_EXEC_FAILED";
35
+ case CUFFT_SETUP_FAILED:
36
+ return "CUFFT_SETUP_FAILED";
37
+ case CUFFT_INVALID_SIZE:
38
+ return "CUFFT_INVALID_SIZE";
39
+ case CUFFT_UNALIGNED_DATA:
40
+ return "CUFFT_UNALIGNED_DATA";
41
+ case CUFFT_INCOMPLETE_PARAMETER_LIST:
42
+ return "CUFFT_INCOMPLETE_PARAMETER_LIST";
43
+ case CUFFT_INVALID_DEVICE:
44
+ return "CUFFT_INVALID_DEVICE";
45
+ case CUFFT_PARSE_ERROR:
46
+ return "CUFFT_PARSE_ERROR";
47
+ case CUFFT_NO_WORKSPACE:
48
+ return "CUFFT_NO_WORKSPACE";
49
+ case CUFFT_NOT_IMPLEMENTED:
50
+ return "CUFFT_NOT_IMPLEMENTED";
51
+ #if !defined(USE_ROCM)
52
+ case CUFFT_LICENSE_ERROR:
53
+ return "CUFFT_LICENSE_ERROR";
54
+ #endif
55
+ case CUFFT_NOT_SUPPORTED:
56
+ return "CUFFT_NOT_SUPPORTED";
57
+ default:
58
+ std::ostringstream ss;
59
+ ss << "unknown error " << error;
60
+ return ss.str();
61
+ }
62
+ }
63
+
64
+ static inline void CUFFT_CHECK(cufftResult error)
65
+ {
66
+ if (error != CUFFT_SUCCESS) {
67
+ std::ostringstream ss;
68
+ ss << "cuFFT error: " << _cudaGetErrorEnum(error);
69
+ AT_ERROR(ss.str());
70
+ }
71
+ }
72
+
73
+ }} // at::native