applied-ai-018 commited on
Commit
083197a
·
verified ·
1 Parent(s): 89256fc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h +14 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h +10 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h +4 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h +329 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h +574 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h +43 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h +47 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h +452 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h +8 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h +289 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h +1090 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h +431 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h +468 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h +432 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h +565 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h +879 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h +1540 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h +1327 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h +56 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h +449 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h +368 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h +298 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h +245 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h +396 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h +407 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vsx_helpers.h +473 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h +1077 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h +50 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/AtomicAddFloat.h +37 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h +12 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CopyKernel.h +12 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DistributionTemplates.h +368 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IsContiguous.h +62 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ReduceUtils.h +240 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SoftmaxKernel.h +28 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SpmmReduceKernel.h +22 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/StackKernel.h +12 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/mixed_data_type.h +41 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Activation.h +20 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Copy.h +10 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Distributions.h +25 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh +22 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/IndexKernel.h +16 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Math.cuh +0 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MemoryAccess.cuh +385 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/PersistentSoftmax.cuh +401 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Resize.h +61 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ScanKernels.h +18 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sorting.h +18 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortingRadixSelect.cuh +429 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /// Flush-To-Zero and Denormals-Are-Zero mode
2
+ ///
3
+ /// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass
4
+ /// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64
5
+ /// and some x86 CPUs. They result in reduced precision for values near zero,
6
+ /// but increased performance.
7
+ ///
8
+ /// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz
9
+
10
+ namespace at::cpu {
11
+
12
+ bool set_flush_denormal(bool on);
13
+
14
+ } // namespace at::cpu
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace at::cpu {
6
+
7
+ // Detect if CPU support Vector Neural Network Instruction.
8
+ TORCH_API bool is_cpu_support_vnni();
9
+
10
+ } // namespace at::cpu
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/functional_base.h>
4
+ #include <ATen/cpu/vec/functional_bfloat16.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at::vec {
10
+
11
+ // slow path
12
+ template <typename scalar_t, typename Op>
13
+ inline scalar_t vec_reduce_all(
14
+ const Op& vec_fun,
15
+ vec::Vectorized<scalar_t> acc_vec,
16
+ int64_t size) {
17
+ using Vec = vec::Vectorized<scalar_t>;
18
+ scalar_t acc_arr[Vec::size()];
19
+ acc_vec.store(acc_arr);
20
+ for (const auto i : c10::irange(1, size)) {
21
+ std::array<scalar_t, Vec::size()> acc_arr_next = {0};
22
+ acc_arr_next[0] = acc_arr[i];
23
+ Vec acc_vec_next = Vec::loadu(acc_arr_next.data());
24
+ acc_vec = vec_fun(acc_vec, acc_vec_next);
25
+ }
26
+ acc_vec.store(acc_arr);
27
+ return acc_arr[0];
28
+ }
29
+
30
+ template <typename scalar_t, typename Op>
31
+ struct VecReduceAllSIMD {
32
+ static inline scalar_t apply(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
33
+ return vec_reduce_all(vec_fun, acc_vec, Vectorized<scalar_t>::size());
34
+ }
35
+ };
36
+
37
+ #if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
38
+ #if defined(CPU_CAPABILITY_AVX2)
39
+ template <typename Op>
40
+ struct VecReduceAllSIMD<float, Op> {
41
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
42
+ using Vec = Vectorized<float>;
43
+ Vec v = acc_vec;
44
+ // 128-bit shuffle
45
+ Vec v1 = _mm256_permute2f128_ps(v, v, 0x1);
46
+ v = vec_fun(v, v1);
47
+ // 64-bit shuffle
48
+ v1 = _mm256_shuffle_ps(v, v, 0x4E);
49
+ v = vec_fun(v, v1);
50
+ // 32-bit shuffle
51
+ v1 = _mm256_shuffle_ps(v, v, 0xB1);
52
+ v = vec_fun(v, v1);
53
+ return _mm256_cvtss_f32(v);
54
+ }
55
+ };
56
+ #endif // defined(CPU_CAPABILITY_AVX2)
57
+ #if defined(CPU_CAPABILITY_AVX512)
58
+ template <typename Op>
59
+ struct VecReduceAllSIMD<float, Op> {
60
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
61
+ using Vec = Vectorized<float>;
62
+ Vec v = acc_vec;
63
+ // 256-bit shuffle
64
+ Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
65
+ v = vec_fun(v, v1);
66
+ // 128-bit shuffle
67
+ v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
68
+ v = vec_fun(v, v1);
69
+ // 64-bit shuffle
70
+ v1 = _mm512_shuffle_ps(v, v, 0x4E);
71
+ v = vec_fun(v, v1);
72
+ // 32-bit shuffle
73
+ v1 = _mm512_shuffle_ps(v, v, 0xB1);
74
+ v = vec_fun(v, v1);
75
+ return _mm512_cvtss_f32(v);
76
+ }
77
+ };
78
+ #endif // defined(CPU_CAPABILITY_AVX512)
79
+ #endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
80
+
81
+ template <typename scalar_t, typename Op>
82
+ inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
83
+ return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec);
84
+ }
85
+
86
+ template <typename scalar_t, typename Op,
87
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
88
+ inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
89
+ using Vec = vec::Vectorized<scalar_t>;
90
+ if (size < Vec::size())
91
+ return vec_reduce_all(vec_fun, Vec::loadu(data, size), size);
92
+ int64_t d = Vec::size();
93
+ Vec acc_vec = Vec::loadu(data);
94
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
95
+ Vec data_vec = Vec::loadu(data + d);
96
+ acc_vec = vec_fun(acc_vec, data_vec);
97
+ }
98
+ if (size - d > 0) {
99
+ Vec data_vec = Vec::loadu(data + d, size - d);
100
+ acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d);
101
+ }
102
+ return vec_reduce_all(vec_fun, acc_vec);
103
+ }
104
+
105
+ // similar to reduce_all, but reduces into two outputs
106
+ template <typename scalar_t, typename Op1, typename Op2,
107
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
108
+ inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
109
+ const scalar_t* data, int64_t size) {
110
+ using Vec = vec::Vectorized<scalar_t>;
111
+ if (size < Vec::size()) {
112
+ auto loaded_data = Vec::loadu(data, size);
113
+ return std::pair<scalar_t, scalar_t>(
114
+ vec_reduce_all(vec_fun1, loaded_data, size),
115
+ vec_reduce_all(vec_fun2, loaded_data, size));
116
+ }
117
+ int64_t d = Vec::size();
118
+ Vec acc_vec1 = Vec::loadu(data);
119
+ Vec acc_vec2 = Vec::loadu(data);
120
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
121
+ Vec data_vec = Vec::loadu(data + d);
122
+ acc_vec1 = vec_fun1(acc_vec1, data_vec);
123
+ acc_vec2 = vec_fun2(acc_vec2, data_vec);
124
+ }
125
+ if (size - d > 0) {
126
+ Vec data_vec = Vec::loadu(data + d, size - d);
127
+ acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d);
128
+ acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d);
129
+ }
130
+ return std::pair<scalar_t, scalar_t>(
131
+ vec_reduce_all(vec_fun1, acc_vec1),
132
+ vec_reduce_all(vec_fun2, acc_vec2));
133
+ }
134
+
135
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
136
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
137
+ inline scalar_t map_reduce_all(
138
+ const MapOp& map_fun,
139
+ const ReduceOp& red_fun,
140
+ const scalar_t* data,
141
+ int64_t size) {
142
+ using Vec = vec::Vectorized<scalar_t>;
143
+ if (size < Vec::size())
144
+ return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size);
145
+ int64_t d = Vec::size();
146
+ Vec acc_vec = map_fun(Vec::loadu(data));
147
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
148
+ Vec data_vec = Vec::loadu(data + d);
149
+ data_vec = map_fun(data_vec);
150
+ acc_vec = red_fun(acc_vec, data_vec);
151
+ }
152
+ if (size - d > 0) {
153
+ Vec data_vec = Vec::loadu(data + d, size - d);
154
+ data_vec = map_fun(data_vec);
155
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
156
+ }
157
+ return vec_reduce_all(red_fun, acc_vec);
158
+ }
159
+
160
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
161
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
162
+ inline scalar_t map2_reduce_all(
163
+ const MapOp& map_fun,
164
+ const ReduceOp& red_fun,
165
+ const scalar_t* data,
166
+ const scalar_t* data2,
167
+ int64_t size) {
168
+ using Vec = vec::Vectorized<scalar_t>;
169
+ if (size < Vec::size()) {
170
+ Vec data_vec = Vec::loadu(data, size);
171
+ Vec data2_vec = Vec::loadu(data2, size);
172
+ data_vec = map_fun(data_vec, data2_vec);
173
+ return vec_reduce_all(red_fun, data_vec, size);
174
+ }
175
+ int64_t d = Vec::size();
176
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2));
177
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
178
+ Vec data_vec = Vec::loadu(data + d);
179
+ Vec data2_vec = Vec::loadu(data2 + d);
180
+ data_vec = map_fun(data_vec, data2_vec);
181
+ acc_vec = red_fun(acc_vec, data_vec);
182
+ }
183
+ if (size - d > 0) {
184
+ Vec data_vec = Vec::loadu(data + d, size - d);
185
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
186
+ data_vec = map_fun(data_vec, data2_vec);
187
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
188
+ }
189
+ return vec_reduce_all(red_fun, acc_vec);
190
+ }
191
+
192
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
193
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
194
+ inline scalar_t map3_reduce_all(
195
+ const MapOp& map_fun,
196
+ const ReduceOp& red_fun,
197
+ const scalar_t* data,
198
+ const scalar_t* data2,
199
+ const scalar_t* data3,
200
+ int64_t size) {
201
+ using Vec = vec::Vectorized<scalar_t>;
202
+ if (size < Vec::size()) {
203
+ Vec data_vec = Vec::loadu(data, size);
204
+ Vec data2_vec = Vec::loadu(data2, size);
205
+ Vec data3_vec = Vec::loadu(data3, size);
206
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
207
+ return vec_reduce_all(red_fun, data_vec, size);
208
+ }
209
+
210
+ int64_t d = Vec::size();
211
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3));
212
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
213
+ Vec data_vec = Vec::loadu(data + d);
214
+ Vec data2_vec = Vec::loadu(data2 + d);
215
+ Vec data3_vec = Vec::loadu(data3 + d);
216
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
217
+ acc_vec = red_fun(acc_vec, data_vec);
218
+ }
219
+ if (size - d > 0) {
220
+ Vec data_vec = Vec::loadu(data + d, size - d);
221
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
222
+ Vec data3_vec = Vec::loadu(data3 + d, size - d);
223
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
224
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
225
+ }
226
+ return vec_reduce_all(red_fun, acc_vec);
227
+ }
228
+
229
+ template <typename scalar_t, typename Op,
230
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
231
+ inline void map(
232
+ const Op& vec_fun,
233
+ scalar_t* output_data,
234
+ const scalar_t* input_data,
235
+ int64_t size) {
236
+ using Vec = vec::Vectorized<scalar_t>;
237
+ int64_t d = 0;
238
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
239
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d));
240
+ output_vec.store(output_data + d);
241
+ }
242
+ if (size - d > 0) {
243
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d));
244
+ output_vec.store(output_data + d, size - d);
245
+ }
246
+ }
247
+
248
+ template <typename scalar_t, typename Op,
249
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
250
+ inline void map2(
251
+ const Op& vec_fun,
252
+ scalar_t* output_data,
253
+ const scalar_t* input_data,
254
+ const scalar_t* input_data2,
255
+ int64_t size) {
256
+ using Vec = vec::Vectorized<scalar_t>;
257
+ int64_t d = 0;
258
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
259
+ Vec data_vec = Vec::loadu(input_data + d);
260
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
261
+ Vec output_vec = vec_fun(data_vec, data_vec2);
262
+ output_vec.store(output_data + d);
263
+ }
264
+ if (size - d > 0) {
265
+ Vec data_vec = Vec::loadu(input_data + d, size - d);
266
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
267
+ Vec output_vec = vec_fun(data_vec, data_vec2);
268
+ output_vec.store(output_data + d, size - d);
269
+ }
270
+ }
271
+
272
+ template <typename scalar_t, typename Op,
273
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
274
+ inline void map3(
275
+ const Op& vec_fun,
276
+ scalar_t* output_data,
277
+ const scalar_t* input_data1,
278
+ const scalar_t* input_data2,
279
+ const scalar_t* input_data3,
280
+ int64_t size) {
281
+ using Vec = vec::Vectorized<scalar_t>;
282
+ int64_t d = 0;
283
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
284
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
285
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
286
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
287
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
288
+ output_vec.store(output_data + d);
289
+ }
290
+ if (size - d > 0) {
291
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
292
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
293
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
294
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
295
+ output_vec.store(output_data + d, size - d);
296
+ }
297
+ }
298
+
299
+ template <typename scalar_t, typename Op,
300
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
301
+ inline void map4(
302
+ const Op& vec_fun,
303
+ scalar_t* output_data,
304
+ const scalar_t* input_data1,
305
+ const scalar_t* input_data2,
306
+ const scalar_t* input_data3,
307
+ const scalar_t* input_data4,
308
+ int64_t size) {
309
+ using Vec = vec::Vectorized<scalar_t>;
310
+ int64_t d = 0;
311
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
312
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
313
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
314
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
315
+ Vec data_vec4 = Vec::loadu(input_data4 + d);
316
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
317
+ output_vec.store(output_data + d);
318
+ }
319
+ if (size - d > 0) {
320
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
321
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
322
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
323
+ Vec data_vec4 = Vec::loadu(input_data4 + d, size - d);
324
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
325
+ output_vec.store(output_data + d, size - d);
326
+ }
327
+ }
328
+
329
+ } // namespace at::vec
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+
8
+ namespace at::vec {
9
+
10
+ // BFloat16 specification
11
+ template <typename scalar_t> struct VecScalarType { using type = scalar_t; };
12
+ template <> struct VecScalarType<BFloat16> { using type = float; };
13
+ template <> struct VecScalarType<Half> { using type = float; };
14
+
15
+ // This is different from at::acc_type since we only need to specialize BFloat16
16
+ template <typename scalar_t>
17
+ using vec_scalar_t = typename VecScalarType<scalar_t>::type;
18
+
19
+ // Vector conversion between float and bfloat16/half
20
+ template <typename scalar_t,
21
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
22
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float(const Vectorized<scalar_t>&);
23
+
24
+ template <>
25
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<BFloat16> (const Vectorized<BFloat16>& a) {
26
+ return convert_bfloat16_float(a);
27
+ }
28
+
29
+ template <>
30
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<Half> (const Vectorized<Half>& a) {
31
+ return convert_half_float(a);
32
+ }
33
+
34
+ template <typename scalar_t,
35
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
36
+ inline Vectorized<scalar_t> convert_from_float(const Vectorized<float>&, const Vectorized<float>&);
37
+
38
+ template <>
39
+ inline Vectorized<BFloat16> convert_from_float<BFloat16>(const Vectorized<float>& a, const Vectorized<float>& b) {
40
+ return convert_float_bfloat16(a, b);
41
+ }
42
+
43
+ template <>
44
+ inline Vectorized<Half> convert_from_float<Half>(const Vectorized<float>& a, const Vectorized<float>& b) {
45
+ return convert_float_half(a, b);
46
+ }
47
+
48
+ // Note that we already have specialized member of Vectorized<scalar_t> for BFloat16
49
+ // so the following functions would run smoothly:
50
+ // using Vec = Vectorized<BFloat16>;
51
+ // Vec one = Vec(BFloat16(1));
52
+ // vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N);
53
+ //
54
+ // Then why we still need to specialize "functional"?
55
+ // If we do specialization at Vectorized<> level, the above example would need 3 pairs of
56
+ // conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/".
57
+ // If we do specialization at vec::map<>() level, we have only 1 pair of conversion
58
+ // of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only.
59
+ //
60
+ // The following BFloat16 functionality will only do data type conversion for input
61
+ // and output vector (reduce functionality will only convert the final scalar back to bf16).
62
+ // Compared to Vectorized<> specialization,
63
+ // 1. better performance since we have less data type conversion;
64
+ // 2. less rounding error since immediate results are kept in fp32;
65
+ // 3. accumulation done on data type of fp32.
66
+ //
67
+ // If you plan to extend this file, please ensure adding unit tests at
68
+ // aten/src/ATen/test/vec_test_all_types.cpp
69
+ //
70
+ template <typename scalar_t, typename Op,
71
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
72
+ inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
73
+ using bVec = vec::Vectorized<scalar_t>;
74
+ using fVec = vec::Vectorized<float>;
75
+ if (size < bVec::size()) {
76
+ bVec data_bvec = bVec::loadu(data, size);
77
+ fVec data_fvec0, data_fvec1;
78
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
79
+ if (size > fVec::size()) {
80
+ data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size());
81
+ return vec_reduce_all<float>(vec_fun, data_fvec0, fVec::size());
82
+ } else {
83
+ return vec_reduce_all<float>(vec_fun, data_fvec0, size);
84
+ }
85
+ }
86
+ int64_t d = bVec::size();
87
+ bVec acc_bvec = bVec::loadu(data);
88
+ fVec acc_fvec0, acc_fvec1;
89
+ std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec);
90
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
91
+ bVec data_bvec = bVec::loadu(data + d);
92
+ fVec data_fvec0, data_fvec1;
93
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
94
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
95
+ acc_fvec1 = vec_fun(acc_fvec1, data_fvec1);
96
+ }
97
+ if (size - d > 0) {
98
+ bVec data_bvec = bVec::loadu(data + d, size - d);
99
+ fVec data_fvec0, data_fvec1;
100
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
101
+ if (size - d > fVec::size()) {
102
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
103
+ acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
104
+ } else {
105
+ acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d);
106
+ }
107
+ }
108
+ acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1);
109
+ return vec_reduce_all<float>(vec_fun, acc_fvec0);
110
+ }
111
+
112
+ template <typename scalar_t, typename Op1, typename Op2,
113
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
114
+ inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
115
+ const scalar_t* data, int64_t size) {
116
+ using bVec = vec::Vectorized<scalar_t>;
117
+ using fVec = vec::Vectorized<float>;
118
+ if (size < bVec::size()) {
119
+ bVec data_bvec = bVec::loadu(data, size);
120
+ fVec data_fvec0, data_fvec1;
121
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
122
+ if (size > fVec::size()) {
123
+ fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size());
124
+ fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size());
125
+ return std::pair<scalar_t, scalar_t>(
126
+ vec_reduce_all<float>(vec_fun1, acc1_fvec, fVec::size()),
127
+ vec_reduce_all<float>(vec_fun2, acc2_fvec, fVec::size()));
128
+ } else {
129
+ return std::pair<scalar_t, scalar_t>(
130
+ vec_reduce_all<float>(vec_fun1, data_fvec0, size),
131
+ vec_reduce_all<float>(vec_fun2, data_fvec0, size));
132
+ }
133
+ }
134
+ int64_t d = bVec::size();
135
+ bVec acc_bvec = bVec::loadu(data);
136
+ fVec acc1_fvec0, acc1_fvec1;
137
+ std::tie(acc1_fvec0, acc1_fvec1) = convert_to_float<scalar_t>(acc_bvec);
138
+ fVec acc2_fvec0, acc2_fvec1;
139
+ std::tie(acc2_fvec0, acc2_fvec1) = convert_to_float<scalar_t>(acc_bvec);
140
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
141
+ bVec data_bvec = bVec::loadu(data + d);
142
+ fVec data_fvec0, data_fvec1;
143
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
144
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
145
+ acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1);
146
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
147
+ acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1);
148
+ }
149
+ if (size - d > 0) {
150
+ bVec data_bvec = bVec::loadu(data + d, size - d);
151
+ fVec data_fvec0, data_fvec1;
152
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
153
+ if (size - d > fVec::size()) {
154
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
155
+ acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size());
156
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
157
+ acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size());
158
+ } else {
159
+ acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d);
160
+ acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d);
161
+ }
162
+ }
163
+ acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1);
164
+ acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1);
165
+ return std::pair<scalar_t, scalar_t>(
166
+ vec_reduce_all<float>(vec_fun1, acc1_fvec0),
167
+ vec_reduce_all<float>(vec_fun2, acc2_fvec0));
168
+ }
169
+
170
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
171
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
172
+ inline scalar_t map_reduce_all(
173
+ const MapOp& map_fun,
174
+ const ReduceOp& red_fun,
175
+ const scalar_t* data,
176
+ int64_t size) {
177
+ using bVec = vec::Vectorized<scalar_t>;
178
+ using fVec = vec::Vectorized<float>;
179
+ if (size < bVec::size()) {
180
+ bVec data_bvec = bVec::loadu(data, size);
181
+ fVec data_fvec0, data_fvec1;
182
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
183
+ if (size > fVec::size()) {
184
+ data_fvec0 = map_fun(data_fvec0);
185
+ data_fvec1 = map_fun(data_fvec1);
186
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
187
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
188
+ } else {
189
+ data_fvec0 = map_fun(data_fvec0);
190
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
191
+ }
192
+ }
193
+ int64_t d = bVec::size();
194
+ bVec acc_bvec = bVec::loadu(data);
195
+ fVec acc_fvec0, acc_fvec1;
196
+ std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec);
197
+ acc_fvec0 = map_fun(acc_fvec0);
198
+ acc_fvec1 = map_fun(acc_fvec1);
199
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
200
+ bVec data_bvec = bVec::loadu(data + d);
201
+ fVec data_fvec0, data_fvec1;
202
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
203
+ data_fvec0 = map_fun(data_fvec0);
204
+ data_fvec1 = map_fun(data_fvec1);
205
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
206
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
207
+ }
208
+ if (size - d > 0) {
209
+ bVec data_bvec = bVec::loadu(data + d, size - d);
210
+ fVec data_fvec0, data_fvec1;
211
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
212
+ if (size - d > fVec::size()) {
213
+ data_fvec0 = map_fun(data_fvec0);
214
+ data_fvec1 = map_fun(data_fvec1);
215
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
216
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
217
+ } else {
218
+ data_fvec0 = map_fun(data_fvec0);
219
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
220
+ }
221
+ }
222
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
223
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
224
+ }
225
+
226
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
227
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
228
+ inline scalar_t map2_reduce_all(
229
+ const MapOp& map_fun,
230
+ const ReduceOp& red_fun,
231
+ const scalar_t* data,
232
+ const scalar_t* data2,
233
+ int64_t size) {
234
+ using bVec = vec::Vectorized<scalar_t>;
235
+ using fVec = vec::Vectorized<float>;
236
+ if (size < bVec::size()) {
237
+ bVec data_bvec = bVec::loadu(data, size);
238
+ fVec data_fvec0, data_fvec1;
239
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
240
+ bVec data2_bvec = bVec::loadu(data2, size);
241
+ fVec data2_fvec0, data2_fvec1;
242
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
243
+ if (size > fVec::size()) {
244
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
245
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
246
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
247
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
248
+ } else {
249
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
250
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
251
+ }
252
+ }
253
+ int64_t d = bVec::size();
254
+ bVec acc_bvec = bVec::loadu(data);
255
+ fVec acc_fvec0, acc_fvec1;
256
+ std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec);
257
+ bVec acc2_bvec = bVec::loadu(data2);
258
+ fVec acc2_fvec0, acc2_fvec1;
259
+ std::tie(acc2_fvec0, acc2_fvec1) = convert_to_float<scalar_t>(acc2_bvec);
260
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0);
261
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1);
262
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
263
+ bVec data_bvec = bVec::loadu(data + d);
264
+ fVec data_fvec0, data_fvec1;
265
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
266
+ bVec data2_bvec = bVec::loadu(data2 + d);
267
+ fVec data2_fvec0, data2_fvec1;
268
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
269
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
270
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
271
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
272
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
273
+ }
274
+ if (size - d > 0) {
275
+ bVec data_bvec = bVec::loadu(data + d, size - d);
276
+ fVec data_fvec0, data_fvec1;
277
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
278
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
279
+ fVec data2_fvec0, data2_fvec1;
280
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
281
+ if (size - d > fVec::size()) {
282
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
283
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
284
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
285
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
286
+ } else {
287
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
288
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
289
+ }
290
+ }
291
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
292
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
293
+ }
294
+
295
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
296
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
297
+ inline scalar_t map3_reduce_all(
298
+ const MapOp& map_fun,
299
+ const ReduceOp& red_fun,
300
+ const scalar_t* data,
301
+ const scalar_t* data2,
302
+ const scalar_t* data3,
303
+ int64_t size) {
304
+ using bVec = vec::Vectorized<scalar_t>;
305
+ using fVec = vec::Vectorized<float>;
306
+ if (size < bVec::size()) {
307
+ bVec data_bvec = bVec::loadu(data, size);
308
+ fVec data_fvec0, data_fvec1;
309
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
310
+ bVec data2_bvec = bVec::loadu(data2, size);
311
+ fVec data2_fvec0, data2_fvec1;
312
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
313
+ bVec data3_bvec = bVec::loadu(data3, size);
314
+ fVec data3_fvec0, data3_fvec1;
315
+ std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec);
316
+ if (size > fVec::size()) {
317
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
318
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
319
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
320
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
321
+ } else {
322
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
323
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
324
+ }
325
+ }
326
+ int64_t d = bVec::size();
327
+ bVec acc_bvec = bVec::loadu(data);
328
+ fVec acc_fvec0, acc_fvec1;
329
+ std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec);
330
+ bVec acc2_bvec = bVec::loadu(data2);
331
+ fVec acc2_fvec0, acc2_fvec1;
332
+ std::tie(acc2_fvec0, acc2_fvec1) = convert_to_float<scalar_t>(acc2_bvec);
333
+ bVec acc3_bvec = bVec::loadu(data3);
334
+ fVec acc3_fvec0, acc3_fvec1;
335
+ std::tie(acc3_fvec0, acc3_fvec1) = convert_to_float<scalar_t>(acc3_bvec);
336
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0);
337
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1);
338
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
339
+ bVec data_bvec = bVec::loadu(data + d);
340
+ fVec data_fvec0, data_fvec1;
341
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
342
+ bVec data2_bvec = bVec::loadu(data2 + d);
343
+ fVec data2_fvec0, data2_fvec1;
344
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
345
+ bVec data3_bvec = bVec::loadu(data3 + d);
346
+ fVec data3_fvec0, data3_fvec1;
347
+ std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec);
348
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
349
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
350
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
351
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
352
+ }
353
+ if (size - d > 0) {
354
+ bVec data_bvec = bVec::loadu(data + d, size - d);
355
+ fVec data_fvec0, data_fvec1;
356
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
357
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
358
+ fVec data2_fvec0, data2_fvec1;
359
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
360
+ bVec data3_bvec = bVec::loadu(data3 + d, size - d);
361
+ fVec data3_fvec0, data3_fvec1;
362
+ std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec);
363
+ if (size - d > fVec::size()) {
364
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
365
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
366
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
367
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
368
+ } else {
369
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
370
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
371
+ }
372
+ }
373
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
374
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
375
+ }
376
+
377
+ template <typename scalar_t, typename Op,
378
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
379
+ inline void map(
380
+ const Op& vec_fun,
381
+ scalar_t* output_data,
382
+ const scalar_t* input_data,
383
+ int64_t size) {
384
+ using bVec = vec::Vectorized<scalar_t>;
385
+ using fVec = vec::Vectorized<float>;
386
+ int64_t d = 0;
387
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
388
+ bVec data_bvec = bVec::loadu(input_data + d);
389
+ fVec data_fvec0, data_fvec1;
390
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
391
+ fVec output_fvec0 = vec_fun(data_fvec0);
392
+ fVec output_fvec1 = vec_fun(data_fvec1);
393
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
394
+ output_bvec.store(output_data + d);
395
+ }
396
+ if (size - d > 0) {
397
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
398
+ fVec data_fvec0, data_fvec1;
399
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
400
+ fVec output_fvec0 = vec_fun(data_fvec0);
401
+ fVec output_fvec1 = vec_fun(data_fvec1);
402
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
403
+ output_bvec.store(output_data + d, size - d);
404
+ }
405
+ }
406
+
407
+ template <typename scalar_t, typename Op,
408
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
409
+ inline void map(
410
+ const Op& vec_fun,
411
+ scalar_t* output_data,
412
+ const float* input_data,
413
+ int64_t size) {
414
+ using bVec = vec::Vectorized<scalar_t>;
415
+ using fVec = vec::Vectorized<float>;
416
+ int64_t d = 0;
417
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
418
+ fVec data_fvec0 = fVec::loadu(input_data + d);
419
+ fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size());
420
+ fVec output_fvec0 = vec_fun(data_fvec0);
421
+ fVec output_fvec1 = vec_fun(data_fvec1);
422
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
423
+ output_bvec.store(output_data + d);
424
+ }
425
+ if (size - d > 0) {
426
+ fVec data_fvec0, data_fvec1;
427
+ if (size - d > fVec::size()) {
428
+ data_fvec0 = fVec::loadu(input_data + d);
429
+ data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size());
430
+ } else {
431
+ // choose to align with behaviour of bVec::loadu(ptr, size),
432
+ // which leaves data_fvec1 uninitialized
433
+ data_fvec0 = fVec::loadu(input_data + d, size - d);
434
+ }
435
+ fVec output_fvec0 = vec_fun(data_fvec0);
436
+ fVec output_fvec1 = vec_fun(data_fvec1);
437
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
438
+ output_bvec.store(output_data + d, size - d);
439
+ }
440
+ }
441
+
442
+ template <typename scalar_t, typename Op,
443
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
444
+ inline void map2(
445
+ const Op& vec_fun,
446
+ scalar_t* output_data,
447
+ const scalar_t* input_data,
448
+ const scalar_t* input_data2,
449
+ int64_t size) {
450
+ using bVec = vec::Vectorized<scalar_t>;
451
+ using fVec = vec::Vectorized<float>;
452
+ int64_t d = 0;
453
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
454
+ bVec data_bvec = bVec::loadu(input_data + d);
455
+ fVec data_fvec0, data_fvec1;
456
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
457
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
458
+ fVec data2_fvec0, data2_fvec1;
459
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
460
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
461
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
462
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
463
+ output_bvec.store(output_data + d);
464
+ }
465
+ if (size - d > 0) {
466
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
467
+ fVec data_fvec0, data_fvec1;
468
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
469
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
470
+ fVec data2_fvec0, data2_fvec1;
471
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
472
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
473
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
474
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
475
+ output_bvec.store(output_data + d, size - d);
476
+ }
477
+ }
478
+
479
+ template <typename scalar_t, typename Op,
480
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
481
+ inline void map3(
482
+ const Op& vec_fun,
483
+ scalar_t* output_data,
484
+ const scalar_t* input_data1,
485
+ const scalar_t* input_data2,
486
+ const scalar_t* input_data3,
487
+ int64_t size) {
488
+ using bVec = vec::Vectorized<scalar_t>;
489
+ using fVec = vec::Vectorized<float>;
490
+ int64_t d = 0;
491
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
492
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
493
+ fVec data1_fvec0, data1_fvec1;
494
+ std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec);
495
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
496
+ fVec data2_fvec0, data2_fvec1;
497
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
498
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
499
+ fVec data3_fvec0, data3_fvec1;
500
+ std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec);
501
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
502
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
503
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
504
+ output_bvec.store(output_data + d);
505
+ }
506
+ if (size - d > 0) {
507
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
508
+ fVec data1_fvec0, data1_fvec1;
509
+ std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec);
510
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
511
+ fVec data2_fvec0, data2_fvec1;
512
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
513
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
514
+ fVec data3_fvec0, data3_fvec1;
515
+ std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec);
516
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
517
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
518
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
519
+ output_bvec.store(output_data + d, size - d);
520
+ }
521
+ }
522
+
523
+ template <typename scalar_t, typename Op,
524
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
525
+ inline void map4(
526
+ const Op& vec_fun,
527
+ scalar_t* output_data,
528
+ const scalar_t* input_data1,
529
+ const scalar_t* input_data2,
530
+ const scalar_t* input_data3,
531
+ const scalar_t* input_data4,
532
+ int64_t size) {
533
+ using bVec = vec::Vectorized<scalar_t>;
534
+ using fVec = vec::Vectorized<float>;
535
+ int64_t d = 0;
536
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
537
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
538
+ fVec data1_fvec0, data1_fvec1;
539
+ std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec);
540
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
541
+ fVec data2_fvec0, data2_fvec1;
542
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
543
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
544
+ fVec data3_fvec0, data3_fvec1;
545
+ std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec);
546
+ bVec data4_bvec = bVec::loadu(input_data4 + d);
547
+ fVec data4_fvec0, data4_fvec1;
548
+ std::tie(data4_fvec0, data4_fvec1) = convert_to_float<scalar_t>(data4_bvec);
549
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
550
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
551
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
552
+ output_bvec.store(output_data + d);
553
+ }
554
+ if (size - d > 0) {
555
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
556
+ fVec data1_fvec0, data1_fvec1;
557
+ std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec);
558
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
559
+ fVec data2_fvec0, data2_fvec1;
560
+ std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec);
561
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
562
+ fVec data3_fvec0, data3_fvec1;
563
+ std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec);
564
+ bVec data4_bvec = bVec::loadu(input_data4 + d, size - d);
565
+ fVec data4_fvec0, data4_fvec1;
566
+ std::tie(data4_fvec0, data4_fvec1) = convert_to_float<scalar_t>(data4_bvec);
567
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
568
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
569
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
570
+ output_bvec.store(output_data + d, size - d);
571
+ }
572
+ }
573
+
574
+ } // namespace at::vec
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
3
+ /* GCC or clang-compatible compiler, targeting x86/x86-64 */
4
+ #include <x86intrin.h>
5
+ #elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__))
6
+ /* Clang-compatible compiler, targeting arm neon */
7
+ #include <arm_neon.h>
8
+ #elif defined(_MSC_VER)
9
+ /* Microsoft C/C++-compatible compiler */
10
+ #include <intrin.h>
11
+ #if _MSC_VER <= 1900
12
+ #define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2))
13
+ #define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4))
14
+ #define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8))
15
+ #define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16))
16
+ #endif
17
+ #elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__))
18
+ /* GCC-compatible compiler, targeting ARM with NEON */
19
+ #include <arm_neon.h>
20
+ #if defined (MISSING_ARM_VLD1)
21
+ #include <ATen/cpu/vec/vec256/missing_vld1_neon.h>
22
+ #elif defined (MISSING_ARM_VST1)
23
+ #include <ATen/cpu/vec/vec256/missing_vst1_neon.h>
24
+ #endif
25
+ #elif defined(__GNUC__) && defined(__IWMMXT__)
26
+ /* GCC-compatible compiler, targeting ARM with WMMX */
27
+ #include <mmintrin.h>
28
+ #elif defined(__s390x__)
29
+ // targets Z/architecture
30
+ // we will include vecintrin later
31
+ #elif (defined(__GNUC__) || defined(__xlC__)) && \
32
+ (defined(__VEC__) || defined(__ALTIVEC__))
33
+ /* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
34
+ #include <altivec.h>
35
+ /* We need to undef those tokens defined by <altivec.h> to avoid conflicts
36
+ with the C++ types. => Can still use __bool/__vector */
37
+ #undef bool
38
+ #undef vector
39
+ #undef pixel
40
+ #elif defined(__GNUC__) && defined(__SPE__)
41
+ /* GCC-compatible compiler, targeting PowerPC with SPE */
42
+ #include <spe.h>
43
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if defined(CPU_CAPABILITY_AVX512)
4
+ #include <ATen/cpu/vec/vec512/vec512.h>
5
+ #else
6
+ #include <ATen/cpu/vec/vec256/vec256.h>
7
+ #endif
8
+
9
+ namespace at::vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) {
14
+ __at_align__ bool buffer[x.size()];
15
+ x.ne(Vectorized<int8_t>(0)).store(buffer);
16
+
17
+ Vectorized<bool> ret;
18
+ static_assert(x.size() == ret.size(), "");
19
+ std::memcpy(ret, buffer, ret.size() * sizeof(bool));
20
+ return ret;
21
+ }
22
+
23
+ template <>
24
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr) {
25
+ // See NOTE [Loading boolean values]
26
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr));
27
+ }
28
+
29
+ template <>
30
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr, int64_t count) {
31
+ // See NOTE [Loading boolean values]
32
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr, count));
33
+ }
34
+
35
+ template <typename VT>
36
+ struct VecHoldType { using hold_type = typename VT::value_type; };
37
+
38
+ template <>
39
+ struct VecHoldType<Vectorized<BFloat16>> { using hold_type = BFloat16; };
40
+
41
+ template <>
42
+ struct VecHoldType<Vectorized<Half>> {using hold_type = Half; };
43
+
44
+ template <typename VT>
45
+ using vechold_type = typename VecHoldType<VT>::hold_type;
46
+
47
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vld1_*_x2 and vst1_*_x2 intrinsics in gcc-7. */
2
+
3
+ __extension__ extern __inline uint8x8x2_t
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vld1_u8_x2 (const uint8_t *__a)
6
+ {
7
+ uint8x8x2_t ret;
8
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
9
+ return ret;
10
+ }
11
+
12
+ __extension__ extern __inline int8x8x2_t
13
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14
+ vld1_s8_x2 (const int8_t *__a)
15
+ {
16
+ int8x8x2_t ret;
17
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
18
+ return ret;
19
+ }
20
+
21
+ __extension__ extern __inline uint16x4x2_t
22
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23
+ vld1_u16_x2 (const uint16_t *__a)
24
+ {
25
+ uint16x4x2_t ret;
26
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
27
+ return ret;
28
+ }
29
+
30
+ __extension__ extern __inline int16x4x2_t
31
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32
+ vld1_s16_x2 (const int16_t *__a)
33
+ {
34
+ int16x4x2_t ret;
35
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
36
+ return ret;
37
+ }
38
+
39
+ __extension__ extern __inline uint32x2x2_t
40
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
41
+ vld1_u32_x2 (const uint32_t *__a)
42
+ {
43
+ uint32x2x2_t ret;
44
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
45
+ return ret;
46
+ }
47
+
48
+ __extension__ extern __inline int32x2x2_t
49
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
50
+ vld1_s32_x2 (const int32_t *__a)
51
+ {
52
+ int32x2x2_t ret;
53
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
54
+ return ret;
55
+ }
56
+
57
+ __extension__ extern __inline uint64x1x2_t
58
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
59
+ vld1_u64_x2 (const uint64_t *__a)
60
+ {
61
+ uint64x1x2_t ret;
62
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
63
+ return ret;
64
+ }
65
+
66
+ __extension__ extern __inline int64x1x2_t
67
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
68
+ vld1_s64_x2 (const int64_t *__a)
69
+ {
70
+ int64x1x2_t ret;
71
+ __builtin_aarch64_simd_oi __o;
72
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
73
+ return ret;
74
+ }
75
+
76
+ __extension__ extern __inline float16x4x2_t
77
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
78
+ vld1_f16_x2 (const float16_t *__a)
79
+ {
80
+ float16x4x2_t ret;
81
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
82
+ return ret;
83
+ }
84
+
85
+ __extension__ extern __inline float32x2x2_t
86
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
87
+ vld1_f32_x2 (const float32_t *__a)
88
+ {
89
+ float32x2x2_t ret;
90
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
91
+ return ret;
92
+ }
93
+
94
+ __extension__ extern __inline float64x1x2_t
95
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
96
+ vld1_f64_x2 (const float64_t *__a)
97
+ {
98
+ float64x1x2_t ret;
99
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
100
+ return ret;
101
+ }
102
+
103
+ __extension__ extern __inline poly8x8x2_t
104
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
105
+ vld1_p8_x2 (const poly8_t *__a)
106
+ {
107
+ poly8x8x2_t ret;
108
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
109
+ return ret;
110
+ }
111
+
112
+ __extension__ extern __inline poly16x4x2_t
113
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
114
+ vld1_p16_x2 (const poly16_t *__a)
115
+ {
116
+ poly16x4x2_t ret;
117
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
118
+ return ret;
119
+ }
120
+
121
+ __extension__ extern __inline poly64x1x2_t
122
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
123
+ vld1_p64_x2 (const poly64_t *__a)
124
+ {
125
+ poly64x1x2_t ret;
126
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
127
+ return ret;
128
+ }
129
+
130
+ __extension__ extern __inline uint8x16x2_t
131
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
132
+ vld1q_u8_x2 (const uint8_t *__a)
133
+ {
134
+ uint8x16x2_t ret;
135
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
136
+ return ret;
137
+ }
138
+
139
+ __extension__ extern __inline int8x16x2_t
140
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
141
+ vld1q_s8_x2 (const int8_t *__a)
142
+ {
143
+ int8x16x2_t ret;
144
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
145
+ return ret;
146
+ }
147
+
148
+ __extension__ extern __inline uint16x8x2_t
149
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
150
+ vld1q_u16_x2 (const uint16_t *__a)
151
+ {
152
+ uint16x8x2_t ret;
153
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
154
+ return ret;
155
+ }
156
+
157
+ __extension__ extern __inline int16x8x2_t
158
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
159
+ vld1q_s16_x2 (const int16_t *__a)
160
+ {
161
+ int16x8x2_t ret;
162
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
163
+ return ret;
164
+ }
165
+
166
+ __extension__ extern __inline uint32x4x2_t
167
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
168
+ vld1q_u32_x2 (const uint32_t *__a)
169
+ {
170
+ uint32x4x2_t ret;
171
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
172
+ return ret;
173
+ }
174
+
175
+ __extension__ extern __inline int32x4x2_t
176
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
177
+ vld1q_s32_x2 (const int32_t *__a)
178
+ {
179
+ int32x4x2_t ret;
180
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
181
+ return ret;
182
+ }
183
+
184
+ __extension__ extern __inline uint64x2x2_t
185
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
186
+ vld1q_u64_x2 (const uint64_t *__a)
187
+ {
188
+ uint64x2x2_t ret;
189
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
190
+ return ret;
191
+ }
192
+
193
+ __extension__ extern __inline int64x2x2_t
194
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
195
+ vld1q_s64_x2 (const int64_t *__a)
196
+ {
197
+ int64x2x2_t ret;
198
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
199
+ return ret;
200
+ }
201
+
202
+ __extension__ extern __inline float16x8x2_t
203
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
204
+ vld1q_f16_x2 (const float16_t *__a)
205
+ {
206
+ float16x8x2_t ret;
207
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
208
+ return ret;
209
+ }
210
+
211
+ __extension__ extern __inline float32x4x2_t
212
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
213
+ vld1q_f32_x2 (const float32_t *__a)
214
+ {
215
+ float32x4x2_t ret;
216
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
217
+ return ret;
218
+ }
219
+
220
+ __extension__ extern __inline float64x2x2_t
221
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
222
+ vld1q_f64_x2 (const float64_t *__a)
223
+ {
224
+ float64x2x2_t ret;
225
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
226
+ return ret;
227
+ }
228
+
229
+ __extension__ extern __inline poly8x16x2_t
230
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
231
+ vld1q_p8_x2 (const poly8_t *__a)
232
+ {
233
+ poly8x16x2_t ret;
234
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
235
+ return ret;
236
+ }
237
+
238
+ __extension__ extern __inline poly16x8x2_t
239
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
240
+ vld1q_p16_x2 (const poly16_t *__a)
241
+ {
242
+ poly16x8x2_t ret;
243
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
244
+ return ret;
245
+ }
246
+
247
+ __extension__ extern __inline poly64x2x2_t
248
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
249
+ vld1q_p64_x2 (const poly64_t *__a)
250
+ {
251
+ poly64x2x2_t ret;
252
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
253
+ return ret;
254
+ }
255
+
256
+ /* vst1x2 */
257
+
258
+ __extension__ extern __inline void
259
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
260
+ vst1_s64_x2 (int64_t * __a, int64x1x2_t val)
261
+ {
262
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
263
+ }
264
+
265
+ __extension__ extern __inline void
266
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
267
+ vst1_u64_x2 (uint64_t * __a, uint64x1x2_t val)
268
+ {
269
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
270
+ }
271
+
272
+ __extension__ extern __inline void
273
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
274
+ vst1_f64_x2 (float64_t * __a, float64x1x2_t val)
275
+ {
276
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
277
+ }
278
+
279
+ __extension__ extern __inline void
280
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
281
+ vst1_s8_x2 (int8_t * __a, int8x8x2_t val)
282
+ {
283
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
284
+ }
285
+
286
+ __extension__ extern __inline void
287
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
288
+ vst1_p8_x2 (poly8_t * __a, poly8x8x2_t val)
289
+ {
290
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
291
+ }
292
+
293
+ __extension__ extern __inline void
294
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
295
+ vst1_s16_x2 (int16_t * __a, int16x4x2_t val)
296
+ {
297
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
298
+ }
299
+
300
+ __extension__ extern __inline void
301
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
302
+ vst1_p16_x2 (poly16_t * __a, poly16x4x2_t val)
303
+ {
304
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
305
+ }
306
+
307
+ __extension__ extern __inline void
308
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
309
+ vst1_s32_x2 (int32_t * __a, int32x2x2_t val)
310
+ {
311
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
312
+ }
313
+
314
+ __extension__ extern __inline void
315
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
316
+ vst1_u8_x2 (uint8_t * __a, uint8x8x2_t val)
317
+ {
318
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
319
+ }
320
+
321
+ __extension__ extern __inline void
322
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
323
+ vst1_u16_x2 (uint16_t * __a, uint16x4x2_t val)
324
+ {
325
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
326
+ }
327
+
328
+ __extension__ extern __inline void
329
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
330
+ vst1_u32_x2 (uint32_t * __a, uint32x2x2_t val)
331
+ {
332
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
333
+ }
334
+
335
+ __extension__ extern __inline void
336
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
337
+ vst1_f16_x2 (float16_t * __a, float16x4x2_t val)
338
+ {
339
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
340
+ }
341
+
342
+ __extension__ extern __inline void
343
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
344
+ vst1_f32_x2 (float32_t * __a, float32x2x2_t val)
345
+ {
346
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
347
+ }
348
+
349
+ __extension__ extern __inline void
350
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
351
+ vst1_p64_x2 (poly64_t * __a, poly64x1x2_t val)
352
+ {
353
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
354
+ }
355
+
356
+ __extension__ extern __inline void
357
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
358
+ vst1q_s8_x2 (int8_t * __a, int8x16x2_t val)
359
+ {
360
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
361
+ }
362
+
363
+ __extension__ extern __inline void
364
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
365
+ vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t val)
366
+ {
367
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
368
+ }
369
+
370
+ __extension__ extern __inline void
371
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
372
+ vst1q_s16_x2 (int16_t * __a, int16x8x2_t val)
373
+ {
374
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
375
+ }
376
+
377
+ __extension__ extern __inline void
378
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
379
+ vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t val)
380
+ {
381
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
382
+ }
383
+
384
+ __extension__ extern __inline void
385
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
386
+ vst1q_s32_x2 (int32_t * __a, int32x4x2_t val)
387
+ {
388
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
389
+ }
390
+
391
+ __extension__ extern __inline void
392
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
393
+ vst1q_s64_x2 (int64_t * __a, int64x2x2_t val)
394
+ {
395
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
396
+ }
397
+
398
+ __extension__ extern __inline void
399
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
400
+ vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t val)
401
+ {
402
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
403
+ }
404
+
405
+ __extension__ extern __inline void
406
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
407
+ vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t val)
408
+ {
409
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
410
+ }
411
+
412
+ __extension__ extern __inline void
413
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
414
+ vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t val)
415
+ {
416
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
417
+ }
418
+
419
+ __extension__ extern __inline void
420
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
421
+ vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t val)
422
+ {
423
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
424
+ }
425
+
426
+ __extension__ extern __inline void
427
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
428
+ vst1q_f16_x2 (float16_t * __a, float16x8x2_t val)
429
+ {
430
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
431
+ }
432
+
433
+ __extension__ extern __inline void
434
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
435
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
436
+ {
437
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
438
+ }
439
+
440
+ __extension__ extern __inline void
441
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
442
+ vst1q_f64_x2 (float64_t * __a, float64x2x2_t val)
443
+ {
444
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
445
+ }
446
+
447
+ __extension__ extern __inline void
448
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
449
+ vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t val)
450
+ {
451
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
452
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vst1q_f32_x2 in gcc-8. */
2
+
3
+ __extension__ extern __inline void
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
6
+ {
7
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
8
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+
8
+ #include <ATen/cpu/vec/vec_base.h>
9
+ #if !(defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_ZVECTOR))
10
+ #include <ATen/cpu/vec/vec256/vec256_float.h>
11
+ #include <ATen/cpu/vec/vec256/vec256_float_neon.h>
12
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
13
+ #include <ATen/cpu/vec/vec256/vec256_double.h>
14
+ #include <ATen/cpu/vec/vec256/vec256_int.h>
15
+ #include <ATen/cpu/vec/vec256/vec256_qint.h>
16
+ #include <ATen/cpu/vec/vec256/vec256_complex_float.h>
17
+ #include <ATen/cpu/vec/vec256/vec256_complex_double.h>
18
+ #elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX)
19
+ #include <ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h>
20
+ #else
21
+ #include <ATen/cpu/vec/vec256/zarch/vec256_zarch.h>
22
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
23
+ #endif
24
+
25
+ #include <algorithm>
26
+ #include <cstddef>
27
+ #include <cstdint>
28
+ #include <cstring>
29
+ #include <ostream>
30
+
31
+ namespace at::vec {
32
+
33
+ // Note [CPU_CAPABILITY namespace]
34
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
35
+ // This header, and all of its subheaders, will be compiled with
36
+ // different architecture flags for each supported set of vector
37
+ // intrinsics. So we need to make sure they aren't inadvertently
38
+ // linked together. We do this by declaring objects in an `inline
39
+ // namespace` which changes the name mangling, but can still be
40
+ // accessed as `at::vec`.
41
+ inline namespace CPU_CAPABILITY {
42
+
43
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
44
+ stream << val.val_;
45
+ return stream;
46
+ }
47
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
48
+ stream << static_cast<int>(val.val_);
49
+ return stream;
50
+ }
51
+ inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
52
+ stream << static_cast<unsigned int>(val.val_);
53
+ return stream;
54
+ }
55
+
56
+ template <typename T>
57
+ std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
58
+ T buf[Vectorized<T>::size()];
59
+ vec.store(buf);
60
+ stream << "vec[";
61
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
62
+ if (i != 0) {
63
+ stream << ", ";
64
+ }
65
+ stream << buf[i];
66
+ }
67
+ stream << "]";
68
+ return stream;
69
+ }
70
+
71
+
72
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
73
+
74
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
75
+
76
+ template<>
77
+ inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
78
+ return _mm256_castpd_ps(src);
79
+ }
80
+
81
+ template<>
82
+ inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
83
+ return _mm256_castps_pd(src);
84
+ }
85
+
86
+ template<>
87
+ inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
88
+ return _mm256_castsi256_ps(src);
89
+ }
90
+
91
+ template<>
92
+ inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
93
+ return _mm256_castsi256_pd(src);
94
+ }
95
+
96
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
97
+
98
+ template<int64_t scale = 1>
99
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
100
+ inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
101
+ return _mm256_i64gather_pd(base_addr, vindex, scale);
102
+ }
103
+
104
+ template<int64_t scale = 1>
105
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
106
+ inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
107
+ return _mm256_i32gather_ps(base_addr, vindex, scale);
108
+ }
109
+
110
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
111
+
112
+ template<int64_t scale = 1>
113
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
114
+ inline mask_gather(const Vectorized<double>& src, const double* base_addr,
115
+ const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
116
+ return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale);
117
+ }
118
+
119
+ template<int64_t scale = 1>
120
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
121
+ inline mask_gather(const Vectorized<float>& src, const float* base_addr,
122
+ const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
123
+ return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale);
124
+ }
125
+
126
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
127
+
128
+ // Only works for inputs in the range: [-2^51, 2^51]
129
+ // From: https://stackoverflow.com/a/41148578
130
+ template<>
131
+ Vectorized<int64_t>
132
+ inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
133
+ auto x = _mm256_add_pd(src, _mm256_set1_pd(0x0018000000000000));
134
+ return _mm256_sub_epi64(
135
+ _mm256_castpd_si256(x),
136
+ _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000))
137
+ );
138
+ }
139
+
140
+ template<>
141
+ Vectorized<int32_t>
142
+ inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
143
+ return _mm256_cvttps_epi32(src);
144
+ }
145
+
146
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
147
+
148
+ template <>
149
+ std::pair<Vectorized<double>, Vectorized<double>>
150
+ inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
151
+ // inputs:
152
+ // a = {a0, a1, a3, a3}
153
+ // b = {b0, b1, b2, b3}
154
+
155
+ // swap lanes:
156
+ // a_swapped = {a0, a1, b0, b1}
157
+ // b_swapped = {a2, a3, b2, b3}
158
+ auto a_swapped = _mm256_permute2f128_pd(a, b, 0b0100000); // 0, 2. 4 bits apart
159
+ auto b_swapped = _mm256_permute2f128_pd(a, b, 0b0110001); // 1, 3. 4 bits apart
160
+
161
+ // group cols crossing lanes:
162
+ // return {a0, b0, a1, b1}
163
+ // {a2, b2, a3, b3}
164
+ return std::make_pair(_mm256_permute4x64_pd(a_swapped, 0b11011000), // 0, 2, 1, 3
165
+ _mm256_permute4x64_pd(b_swapped, 0b11011000)); // 0, 2, 1, 3
166
+ }
167
+
168
+ template <>
169
+ std::pair<Vectorized<float>, Vectorized<float>>
170
+ inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
171
+ // inputs:
172
+ // a = {a0, a1, a2, a3, a4, a5, a6, a7}
173
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7}
174
+
175
+ // swap lanes:
176
+ // a_swapped = {a0, a1, a2, a3, b0, b1, b2, b3}
177
+ // b_swapped = {a4, a5, a6, a7, b4, b5, b6, b7}
178
+ // TODO: can we support caching this?
179
+ auto a_swapped = _mm256_permute2f128_ps(a, b, 0b0100000); // 0, 2. 4 bits apart
180
+ auto b_swapped = _mm256_permute2f128_ps(a, b, 0b0110001); // 1, 3. 4 bits apart
181
+
182
+ // group cols crossing lanes:
183
+ // return {a0, b0, a1, b1, a2, b2, a3, b3}
184
+ // {a4, b4, a5, b5, a6, b6, a7, b7}
185
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
186
+ return std::make_pair(_mm256_permutevar8x32_ps(a_swapped, group_ctrl),
187
+ _mm256_permutevar8x32_ps(b_swapped, group_ctrl));
188
+ }
189
+
190
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
191
+
192
+ template <>
193
+ std::pair<Vectorized<double>, Vectorized<double>>
194
+ inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
195
+ // inputs:
196
+ // a = {a0, b0, a1, b1}
197
+ // b = {a2, b2, a3, b3}
198
+
199
+ // group cols crossing lanes:
200
+ // a_grouped = {a0, a1, b0, b1}
201
+ // b_grouped = {a2, a3, b2, b3}
202
+ auto a_grouped = _mm256_permute4x64_pd(a, 0b11011000); // 0, 2, 1, 3
203
+ auto b_grouped = _mm256_permute4x64_pd(b, 0b11011000); // 0, 2, 1, 3
204
+
205
+ // swap lanes:
206
+ // return {a0, a1, a2, a3}
207
+ // {b0, b1, b2, b3}
208
+ return std::make_pair(_mm256_permute2f128_pd(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
209
+ _mm256_permute2f128_pd(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
210
+ }
211
+
212
+ template <>
213
+ std::pair<Vectorized<float>, Vectorized<float>>
214
+ inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
215
+ // inputs:
216
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3}
217
+ // b = {a4, b4, a5, b5, a6, b6, a7, b7}
218
+
219
+ // group cols crossing lanes:
220
+ // a_grouped = {a0, a1, a2, a3, b0, b1, b2, b3}
221
+ // b_grouped = {a4, a5, a6, a7, b4, b5, b6, b7}
222
+ // TODO: can we support caching this?
223
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
224
+ auto a_grouped = _mm256_permutevar8x32_ps(a, group_ctrl);
225
+ auto b_grouped = _mm256_permutevar8x32_ps(b, group_ctrl);
226
+
227
+ // swap lanes:
228
+ // return {a0, a1, a2, a3, a4, a5, a6, a7}
229
+ // {b0, b1, b2, b3, b4, b5, b6, b7}
230
+ return std::make_pair(_mm256_permute2f128_ps(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
231
+ _mm256_permute2f128_ps(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
232
+ }
233
+
234
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
235
+
236
+ template<>
237
+ inline Vectorized<float> flip(const Vectorized<float> & v) {
238
+ const __m256i mask_float = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
239
+ return _mm256_permutevar8x32_ps(v, mask_float);
240
+ }
241
+
242
+ template<>
243
+ inline Vectorized<double> flip(const Vectorized<double> & v) {
244
+ return _mm256_permute4x64_pd(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
245
+ }
246
+
247
+ template<>
248
+ inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
249
+ return _mm256_permute4x64_epi64(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
250
+ }
251
+
252
+ template<>
253
+ inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
254
+ const __m256i mask_int32 = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
255
+ return _mm256_permutevar8x32_epi32(v, mask_int32);
256
+ }
257
+
258
+ template<>
259
+ inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
260
+ const __m256i mask = _mm256_set_epi8(
261
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
262
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
263
+ );
264
+ auto reversed = _mm256_shuffle_epi8(v, mask);
265
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
266
+ }
267
+
268
+ inline __m256i flip8(const __m256i & v) {
269
+ const __m256i mask_int8 = _mm256_set_epi8(
270
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
271
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
272
+ );
273
+ auto reversed = _mm256_shuffle_epi8(v, mask_int8);
274
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
275
+ }
276
+
277
+ template<>
278
+ inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
279
+ return flip8(v);
280
+ }
281
+
282
+ template<>
283
+ inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
284
+ return flip8(v);
285
+ }
286
+
287
+ #endif // (defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
288
+
289
+ }} // namepsace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h ADDED
@@ -0,0 +1,1090 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ #pragma GCC diagnostic push
15
+ #pragma GCC diagnostic ignored "-Wignored-qualifiers"
16
+
17
+ namespace at::vec {
18
+ // See Note [CPU_CAPABILITY namespace]
19
+ inline namespace CPU_CAPABILITY {
20
+
21
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
22
+
23
+ // bfloat16 conversion
24
+ static inline void cvtbf16_fp32(const __m128i& a, __m256& o) {
25
+ o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16));
26
+ }
27
+
28
+ static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
29
+ __m128i lo = _mm256_extractf128_si256(a, 0);
30
+ __m128i hi = _mm256_extractf128_si256(a, 1);
31
+ cvtbf16_fp32(lo, o1);
32
+ cvtbf16_fp32(hi, o2);
33
+ }
34
+ static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) {
35
+ __m256i lo = _mm256_castps_si256(a);
36
+ __m256i hi = _mm256_castps_si256(b);
37
+ __m256i nan = _mm256_set1_epi32(0xffff);
38
+ __m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q));
39
+ __m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q));
40
+ __m256i ones = _mm256_set1_epi32(0x1);
41
+ __m256i vec_bias = _mm256_set1_epi32(0x7fff);
42
+ // uint32_t lsb = (input >> 16) & 1;
43
+ auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones);
44
+ auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones);
45
+ // uint32_t rounding_bias = 0x7fff + lsb;
46
+ t_lo = _mm256_add_epi32(t_lo, vec_bias);
47
+ t_hi = _mm256_add_epi32(t_hi, vec_bias);
48
+ // input += rounding_bias;
49
+ t_lo = _mm256_add_epi32(t_lo, lo);
50
+ t_hi = _mm256_add_epi32(t_hi, hi);
51
+ // input = input >> 16;
52
+ t_lo = _mm256_srli_epi32(t_lo, 16);
53
+ t_hi = _mm256_srli_epi32(t_hi, 16);
54
+ // Check NaN before converting back to bf16
55
+ t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo);
56
+ t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi);
57
+
58
+ t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
59
+ return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00
60
+ }
61
+
62
+ static inline __m256i merge_compare_result(const __m256& a, const __m256& b) {
63
+ __m256i lo = _mm256_castps_si256(a);
64
+ __m256i hi = _mm256_castps_si256(b);
65
+ lo = _mm256_srli_epi32(lo, 16);
66
+ hi = _mm256_srli_epi32(hi, 16);
67
+ auto out = _mm256_packus_epi32(lo, hi);
68
+ return _mm256_permute4x64_epi64(out, 0xd8);
69
+ }
70
+
71
+ // float16 conversion
72
+ static inline void cvtfp16_fp32(const __m128i& a, __m256& o) {
73
+ o = _mm256_cvtph_ps(a);
74
+ }
75
+
76
+ static inline void cvtfp16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
77
+ __m128i lo = _mm256_extractf128_si256(a, 0);
78
+ __m128i hi = _mm256_extractf128_si256(a, 1);
79
+ cvtfp16_fp32(lo, o1);
80
+ cvtfp16_fp32(hi, o2);
81
+ }
82
+
83
+ static inline __m256i cvtfp32_fp16(const __m256& a, const __m256& b) {
84
+ __m128i lo = _mm256_cvtps_ph(
85
+ a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
86
+ __m128i hi = _mm256_cvtps_ph(
87
+ b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
88
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
89
+ }
90
+
91
+ // dtype conversion between float16/bfloat16 and float32
92
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
93
+ inline void cvt_to_fp32(const __m128i& a, __m256& o);
94
+ template <> inline void cvt_to_fp32<BFloat16>(const __m128i& a, __m256& o) {
95
+ cvtbf16_fp32(a, o);
96
+ };
97
+ template <> inline void cvt_to_fp32<Half>(const __m128i& a, __m256& o) {
98
+ cvtfp16_fp32(a, o);
99
+ }
100
+
101
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
102
+ inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2);
103
+ template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m256& o1, __m256& o2) {
104
+ cvtbf16_fp32(a, o1, o2);
105
+ }
106
+ template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m256& o1, __m256& o2) {
107
+ cvtfp16_fp32(a, o1, o2);
108
+ }
109
+
110
+ template <typename T, bool is_compare_op = false,
111
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
112
+ inline __m256i cvt_from_fp32(const __m256& a, const __m256& b);
113
+ template <> inline __m256i cvt_from_fp32<BFloat16, false>(const __m256& a, const __m256& b) {
114
+ return cvtfp32_bf16(a, b);
115
+ }
116
+ template <> inline __m256i cvt_from_fp32<BFloat16, true>(const __m256& a, const __m256& b) {
117
+ return merge_compare_result(a, b);
118
+ }
119
+ template <> inline __m256i cvt_from_fp32<Half, false>(const __m256& a, const __m256& b) {
120
+ return cvtfp32_fp16(a, b);
121
+ }
122
+ template <> inline __m256i cvt_from_fp32<Half, true>(const __m256& a, const __m256& b) {
123
+ return cvtfp32_fp16(a, b);
124
+ }
125
+
126
+ template <typename T>
127
+ class Vectorized16 {
128
+ static_assert(
129
+ is_reduced_floating_point_v<T>,
130
+ "Support only float16 and bfloat16.");
131
+ protected:
132
+ __m256i values;
133
+ public:
134
+ using value_type = uint16_t;
135
+ using size_type = int;
136
+ static constexpr size_type size() {
137
+ return 16;
138
+ }
139
+ Vectorized16() {}
140
+ Vectorized16(__m256i v) : values(v) {}
141
+ Vectorized16(T val) {
142
+ value_type uw = val.x;
143
+ values = _mm256_set1_epi16(uw);
144
+ }
145
+ Vectorized16(T val1, T val2, T val3, T val4,
146
+ T val5, T val6, T val7, T val8,
147
+ T val9, T val10, T val11, T val12,
148
+ T val13, T val14, T val15, T val16) {
149
+ values = _mm256_setr_epi16(
150
+ val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x,
151
+ val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x);
152
+ }
153
+ operator __m256i() const {
154
+ return values;
155
+ }
156
+ T& operator[](int idx) = delete;
157
+ const T& operator[](int idx) const = delete;
158
+ int zero_mask() const {
159
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
160
+ __m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0));
161
+ return _mm256_movemask_epi8(cmp);
162
+ }
163
+ static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
164
+ if (count == size())
165
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
166
+
167
+ __at_align__ int16_t tmp_values[size()];
168
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
169
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(tmp_values));
170
+ }
171
+ void store(void* ptr, int count = size()) const {
172
+ if (count == size()) {
173
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
174
+ } else if (count > 0) {
175
+ __at_align__ int16_t tmp_values[size()];
176
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
177
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
178
+ }
179
+ }
180
+ template <int64_t mask>
181
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
182
+ __at_align__ int16_t tmp_values[size()];
183
+ a.store(tmp_values);
184
+ if (mask & 0x01)
185
+ tmp_values[0] = _mm256_extract_epi16(b.values, 0);
186
+ if (mask & 0x02)
187
+ tmp_values[1] = _mm256_extract_epi16(b.values, 1);
188
+ if (mask & 0x04)
189
+ tmp_values[2] = _mm256_extract_epi16(b.values, 2);
190
+ if (mask & 0x08)
191
+ tmp_values[3] = _mm256_extract_epi16(b.values, 3);
192
+ if (mask & 0x10)
193
+ tmp_values[4] = _mm256_extract_epi16(b.values, 4);
194
+ if (mask & 0x20)
195
+ tmp_values[5] = _mm256_extract_epi16(b.values, 5);
196
+ if (mask & 0x40)
197
+ tmp_values[6] = _mm256_extract_epi16(b.values, 6);
198
+ if (mask & 0x80)
199
+ tmp_values[7] = _mm256_extract_epi16(b.values, 7);
200
+ if (mask & 0x100)
201
+ tmp_values[8] = _mm256_extract_epi16(b.values, 8);
202
+ if (mask & 0x200)
203
+ tmp_values[9] = _mm256_extract_epi16(b.values, 9);
204
+ if (mask & 0x400)
205
+ tmp_values[10] = _mm256_extract_epi16(b.values, 10);
206
+ if (mask & 0x800)
207
+ tmp_values[11] = _mm256_extract_epi16(b.values, 11);
208
+ if (mask & 0x1000)
209
+ tmp_values[12] = _mm256_extract_epi16(b.values, 12);
210
+ if (mask & 0x2000)
211
+ tmp_values[13] = _mm256_extract_epi16(b.values, 13);
212
+ if (mask & 0x4000)
213
+ tmp_values[14] = _mm256_extract_epi16(b.values, 14);
214
+ if (mask & 0x8000)
215
+ tmp_values[15] = _mm256_extract_epi16(b.values, 15);
216
+ return loadu(tmp_values);
217
+ }
218
+ static Vectorized<T> blendv(const Vectorized<T>& a,
219
+ const Vectorized<T>& b, const Vectorized<T>& mask) {
220
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
221
+ }
222
+ template<typename step_t>
223
+ static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
224
+ return Vectorized<T>(
225
+ base, base + step, base + 2 * step, base + 3 * step,
226
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
227
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
228
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
229
+ }
230
+ static Vectorized<T> set(const Vectorized<T>& a,
231
+ const Vectorized<T>& b, int64_t count = size()) {
232
+ switch (count) {
233
+ case 0:
234
+ return a;
235
+ case 1:
236
+ return blend<1>(a, b);
237
+ case 2:
238
+ return blend<3>(a, b);
239
+ case 3:
240
+ return blend<7>(a, b);
241
+ case 4:
242
+ return blend<15>(a, b);
243
+ case 5:
244
+ return blend<31>(a, b);
245
+ case 6:
246
+ return blend<63>(a, b);
247
+ case 7:
248
+ return blend<127>(a, b);
249
+ case 8:
250
+ return blend<255>(a, b);
251
+ case 9:
252
+ return blend<511>(a, b);
253
+ case 10:
254
+ return blend<1023>(a, b);
255
+ case 11:
256
+ return blend<2047>(a, b);
257
+ case 12:
258
+ return blend<4095>(a, b);
259
+ case 13:
260
+ return blend<8191>(a, b);
261
+ case 14:
262
+ return blend<16383>(a, b);
263
+ case 15:
264
+ return blend<32767>(a, b);
265
+ }
266
+ return b;
267
+ }
268
+ Vectorized<T> map(const __m256 (*const vop)(__m256)) const {
269
+ __m256 lo, hi;
270
+ cvt_to_fp32<T>(values, lo, hi);
271
+ const auto o1 = vop(lo);
272
+ const auto o2 = vop(hi);
273
+ return cvt_from_fp32<T>(o1, o2);
274
+ }
275
+ Vectorized<T> isnan() const {
276
+ __m256 lo, hi;
277
+ cvt_to_fp32<T>(values, lo, hi);
278
+ lo = _mm256_cmp_ps(lo, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
279
+ hi = _mm256_cmp_ps(hi, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
280
+ return merge_compare_result(lo, hi);
281
+ }
282
+ Vectorized<T> abs() const {
283
+ return _mm256_andnot_si256(_mm256_set1_epi16(0x8000), values);
284
+ }
285
+ Vectorized<T> angle() const {
286
+ __m256 lo, hi;
287
+ cvt_to_fp32<T>(values, lo, hi);
288
+ auto angle_lambda = [](__m256 values) {
289
+ const auto zero_vec = _mm256_set1_ps(0.f);
290
+ const auto nan_vec = _mm256_set1_ps(NAN);
291
+ const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ);
292
+ const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
293
+ const auto pi = _mm256_set1_ps(c10::pi<float>);
294
+
295
+ const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ);
296
+ auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
297
+ angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
298
+ return angle;
299
+ };
300
+ auto o1 = angle_lambda(lo);
301
+ auto o2 = angle_lambda(hi);
302
+ return cvt_from_fp32<T>(o1, o2);
303
+ }
304
+ Vectorized<T> real() const {
305
+ return *this;
306
+ }
307
+ Vectorized<T> imag() const {
308
+ return _mm256_set1_epi16(0);
309
+ }
310
+ Vectorized<T> conj() const {
311
+ return *this;
312
+ }
313
+ Vectorized<T> acos() const {
314
+ return map(Sleef_acosf8_u10);
315
+ }
316
+ Vectorized<T> asin() const {
317
+ return map(Sleef_asinf8_u10);
318
+ }
319
+ Vectorized<T> atan() const {
320
+ return map(Sleef_atanf8_u10);
321
+ }
322
+ Vectorized<T> atanh() const {
323
+ return map(Sleef_atanhf8_u10);
324
+ }
325
+ Vectorized<T> atan2(const Vectorized<T> &b) const {
326
+ __m256 lo, hi;
327
+ __m256 b1, b2;
328
+ cvt_to_fp32<T>(values, lo, hi);
329
+ cvt_to_fp32<T>(b.values, b1, b2);
330
+ auto o1 = Sleef_atan2f8_u10(lo, b1);
331
+ auto o2 = Sleef_atan2f8_u10(hi, b2);
332
+ return cvt_from_fp32<T>(o1, o2);
333
+ }
334
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
335
+ // copy sign bit (0x8000) from sign and remaining bits from values
336
+ __m256i mask_value = _mm256_set1_epi32(~0x80008000);
337
+ __m256i mask_signbit = _mm256_set1_epi32(0x80008000);
338
+ return Vectorized<T>(
339
+ _mm256_or_si256(
340
+ _mm256_and_si256(values, mask_value),
341
+ _mm256_and_si256(sign, mask_signbit)));
342
+ }
343
+ Vectorized<T> erf() const {
344
+ return map(Sleef_erff8_u10);
345
+ }
346
+ Vectorized<T> erfc() const {
347
+ return map(Sleef_erfcf8_u15);
348
+ }
349
+ Vectorized<T> erfinv() const {
350
+ __m256 lo, hi;
351
+ cvt_to_fp32<T>(values, lo, hi);
352
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
353
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
354
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
355
+ for (int64_t i = 0; i < size() / 2; i++) {
356
+ tmp1[i] = calc_erfinv(tmp1[i]);
357
+ tmp2[i] = calc_erfinv(tmp2[i]);
358
+ }
359
+ auto o1 = _mm256_loadu_ps(tmp1);
360
+ auto o2 = _mm256_loadu_ps(tmp2);
361
+ return cvt_from_fp32<T>(o1, o2);
362
+ }
363
+ Vectorized<T> exp() const {
364
+ return map(Sleef_expf8_u10);
365
+ }
366
+ Vectorized<T> exp2() const {
367
+ return map(Sleef_exp2f8_u10);
368
+ }
369
+ Vectorized<T> expm1() const {
370
+ return map(Sleef_expm1f8_u10);
371
+ }
372
+ Vectorized<T> fmod(const Vectorized<T> & q) const {
373
+ __m256 x_lo, x_hi;
374
+ cvt_to_fp32<T>(values, x_lo, x_hi);
375
+ __m256 q_lo, q_hi;
376
+ cvt_to_fp32<T>(q.values, q_lo, q_hi);
377
+ auto o1 = Sleef_fmodf8(x_lo, q_lo);
378
+ auto o2 = Sleef_fmodf8(x_hi, q_hi);
379
+ return cvt_from_fp32<T>(o1, o2);
380
+ }
381
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
382
+ __m256 lo, hi;
383
+ __m256 b1, b2;
384
+ cvt_to_fp32<T>(values, lo, hi);
385
+ cvt_to_fp32<T>(b.values, b1, b2);
386
+ auto o1 = Sleef_hypotf8_u05(lo, b1);
387
+ auto o2 = Sleef_hypotf8_u05(hi, b2);
388
+ return cvt_from_fp32<T>(o1, o2);
389
+ }
390
+ Vectorized<T> i0() const {
391
+ __m256 lo, hi;
392
+ cvt_to_fp32<T>(values, lo, hi);
393
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
394
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
395
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
396
+ for (int64_t i = 0; i < size() / 2; i++) {
397
+ tmp1[i] = calc_i0(tmp1[i]);
398
+ tmp2[i] = calc_i0(tmp2[i]);
399
+ }
400
+ auto o1 = _mm256_loadu_ps(tmp1);
401
+ auto o2 = _mm256_loadu_ps(tmp2);
402
+ return cvt_from_fp32<T>(o1, o2);
403
+ }
404
+ Vectorized<T> i0e() const {
405
+ __m256 lo, hi;
406
+ cvt_to_fp32<T>(values, lo, hi);
407
+ constexpr auto sz = size();
408
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
409
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
410
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
411
+
412
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
413
+ tmp1[i] = calc_i0e(tmp1[i]);
414
+ tmp2[i] = calc_i0e(tmp2[i]);
415
+ }
416
+ const auto o1 = _mm256_loadu_ps(tmp1);
417
+ const auto o2 = _mm256_loadu_ps(tmp2);
418
+ return cvt_from_fp32<T>(o1, o2);
419
+ }
420
+ Vectorized<T> digamma() const {
421
+ __m256 lo, hi;
422
+ cvt_to_fp32<T>(values, lo, hi);
423
+ constexpr auto sz = size();
424
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
425
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
426
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
427
+
428
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
429
+ tmp1[i] = calc_digamma(tmp1[i]);
430
+ tmp2[i] = calc_digamma(tmp2[i]);
431
+ }
432
+ const auto o1 = _mm256_loadu_ps(tmp1);
433
+ const auto o2 = _mm256_loadu_ps(tmp2);
434
+ return cvt_from_fp32<T>(o1, o2);
435
+ }
436
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
437
+ __m256 lo, hi;
438
+ __m256 xlo, xhi;
439
+ cvt_to_fp32<T>(values, lo, hi);
440
+ cvt_to_fp32<T>(x.values, xlo, xhi);
441
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
442
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
443
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
444
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
445
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
446
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
447
+ for (int64_t i = 0; i < size() / 2; ++i) {
448
+ tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
449
+ tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
450
+ }
451
+ auto o1 = _mm256_loadu_ps(tmp1);
452
+ auto o2 = _mm256_loadu_ps(tmp2);
453
+ return cvt_from_fp32<T>(o1, o2);
454
+ }
455
+
456
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
457
+ __m256 lo, hi;
458
+ __m256 xlo, xhi;
459
+ cvt_to_fp32<T>(values, lo, hi);
460
+ cvt_to_fp32<T>(x.values, xlo, xhi);
461
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
462
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
463
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
464
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
465
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
466
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
467
+ for (int64_t i = 0; i < size() / 2; ++i) {
468
+ tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
469
+ tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
470
+ }
471
+ auto o1 = _mm256_loadu_ps(tmp1);
472
+ auto o2 = _mm256_loadu_ps(tmp2);
473
+ return cvt_from_fp32<T>(o1, o2);
474
+ }
475
+ Vectorized<T> log() const {
476
+ return map(Sleef_logf8_u10);
477
+ }
478
+ Vectorized<T> log2() const {
479
+ return map(Sleef_log2f8_u10);
480
+ }
481
+ Vectorized<T> log10() const {
482
+ return map(Sleef_log10f8_u10);
483
+ }
484
+ Vectorized<T> log1p() const {
485
+ return map(Sleef_log1pf8_u10);
486
+ }
487
+ Vectorized<T> sin() const {
488
+ return map(Sleef_sinf8_u10);
489
+ }
490
+ Vectorized<T> sinh() const {
491
+ return map(Sleef_sinhf8_u10);
492
+ }
493
+ Vectorized<T> cos() const {
494
+ return map(Sleef_cosf8_u10);
495
+ }
496
+ Vectorized<T> cosh() const {
497
+ return map(Sleef_coshf8_u10);
498
+ }
499
+ Vectorized<T> ceil() const {
500
+ __m256 lo, hi;
501
+ cvt_to_fp32<T>(values, lo, hi);
502
+ auto o1 = _mm256_ceil_ps(lo);
503
+ auto o2 = _mm256_ceil_ps(hi);
504
+ return cvt_from_fp32<T>(o1, o2);
505
+ }
506
+ Vectorized<T> floor() const {
507
+ __m256 lo, hi;
508
+ cvt_to_fp32<T>(values, lo, hi);
509
+ auto o1 = _mm256_floor_ps(lo);
510
+ auto o2 = _mm256_floor_ps(hi);
511
+ return cvt_from_fp32<T>(o1, o2);
512
+ }
513
+ Vectorized<T> neg() const {
514
+ return _mm256_xor_si256(values, _mm256_set1_epi16(0x8000));
515
+ }
516
+ Vectorized<T> round() const {
517
+ __m256 lo, hi;
518
+ cvt_to_fp32<T>(values, lo, hi);
519
+ auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
520
+ auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
521
+ return cvt_from_fp32<T>(o1, o2);
522
+ }
523
+ Vectorized<T> tan() const {
524
+ return map(Sleef_tanf8_u10);
525
+ }
526
+ Vectorized<T> tanh() const {
527
+ return map(Sleef_tanhf8_u10);
528
+ }
529
+ Vectorized<T> trunc() const {
530
+ __m256 lo, hi;
531
+ cvt_to_fp32<T>(values, lo, hi);
532
+ auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
533
+ auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
534
+ return cvt_from_fp32<T>(o1, o2);
535
+ }
536
+ Vectorized<T> lgamma() const {
537
+ return map(Sleef_lgammaf8_u10);
538
+ }
539
+ Vectorized<T> sqrt() const {
540
+ __m256 lo, hi;
541
+ cvt_to_fp32<T>(values, lo, hi);
542
+ auto o1 = _mm256_sqrt_ps(lo);
543
+ auto o2 = _mm256_sqrt_ps(hi);
544
+ return cvt_from_fp32<T>(o1, o2);
545
+ }
546
+ Vectorized<T> reciprocal() const {
547
+ __m256 lo, hi;
548
+ cvt_to_fp32<T>(values, lo, hi);
549
+ auto ones = _mm256_set1_ps(1);
550
+ auto o1 = _mm256_div_ps(ones, lo);
551
+ auto o2 = _mm256_div_ps(ones, hi);
552
+ return cvt_from_fp32<T>(o1, o2);
553
+ }
554
+ Vectorized<T> rsqrt() const {
555
+ __m256 lo, hi;
556
+ cvt_to_fp32<T>(values, lo, hi);
557
+ auto ones = _mm256_set1_ps(1);
558
+ auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo));
559
+ auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi));
560
+ return cvt_from_fp32<T>(o1, o2);
561
+ }
562
+ Vectorized<T> pow(const Vectorized<T> &b) const {
563
+ __m256 lo, hi;
564
+ __m256 b1, b2;
565
+ cvt_to_fp32<T>(values, lo, hi);
566
+ cvt_to_fp32<T>(b.values, b1, b2);
567
+ auto o1 = Sleef_powf8_u10(lo, b1);
568
+ auto o2 = Sleef_powf8_u10(hi, b2);
569
+ return cvt_from_fp32<T>(o1, o2);
570
+ }
571
+ private:
572
+ template<typename Op>
573
+ Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
574
+ __m256 a_lo, a_hi;
575
+ __m256 b_lo, b_hi;
576
+ cvt_to_fp32<T>(values, a_lo, a_hi);
577
+ cvt_to_fp32<T>(b.values, b_lo, b_hi);
578
+ auto o1 = op(a_lo, b_lo);
579
+ auto o2 = op(a_hi, b_hi);
580
+ return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
581
+ }
582
+
583
+ public:
584
+ Vectorized<T> inline operator>(const Vectorized<T>& other) const {
585
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GT_OQ); });
586
+ }
587
+ Vectorized<T> inline operator<(const Vectorized<T>& other) const {
588
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LT_OQ); });
589
+ }
590
+ Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
591
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GE_OQ); });
592
+ }
593
+ Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
594
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LE_OQ); });
595
+ }
596
+ Vectorized<T> inline operator==(const Vectorized<T>& other) const {
597
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); });
598
+ }
599
+ Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
600
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); });
601
+ }
602
+ };
603
+
604
+ template<typename T, typename Op>
605
+ static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
606
+ __m256 a_lo, a_hi;
607
+ __m256 b_lo, b_hi;
608
+ cvt_to_fp32<T>(__m256i(a), a_lo, a_hi);
609
+ cvt_to_fp32<T>(__m256i(b), b_lo, b_hi);
610
+ auto o1 = op(a_lo, b_lo);
611
+ auto o2 = op(a_hi, b_hi);
612
+ return cvt_from_fp32<T>(o1, o2);
613
+ }
614
+
615
+ template <>
616
+ class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
617
+ public:
618
+ using Vectorized16::Vectorized16;
619
+
620
+ Vectorized<BFloat16> frac() const;
621
+
622
+ Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
623
+ Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
624
+ Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
625
+ Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
626
+ Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
627
+ Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
628
+ };
629
+
630
+ Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
631
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
632
+ }
633
+ Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
634
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
635
+ }
636
+ Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
637
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
638
+ }
639
+ Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
640
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
641
+ }
642
+ Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
643
+ return _mm256_and_si256(a, b);
644
+ }
645
+ Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
646
+ return _mm256_or_si256(a, b);
647
+ }
648
+ Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
649
+ return _mm256_xor_si256(a, b);
650
+ }
651
+
652
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
653
+ return (*this == other) & Vectorized<BFloat16>(1.0f);
654
+ }
655
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
656
+ return (*this != other) & Vectorized<BFloat16>(1.0f);
657
+ }
658
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
659
+ return (*this > other) & Vectorized<BFloat16>(1.0f);
660
+ }
661
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
662
+ return (*this >= other) & Vectorized<BFloat16>(1.0f);
663
+ }
664
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
665
+ return (*this < other) & Vectorized<BFloat16>(1.0f);
666
+ }
667
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
668
+ return (*this <= other) & Vectorized<BFloat16>(1.0f);
669
+ }
670
+
671
+ // frac. Implement this here so we can use subtraction
672
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
673
+ return *this - this->trunc();
674
+ }
675
+
676
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
677
+ // either input is a NaN.
678
+ template <>
679
+ Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
680
+ __m256 a_lo, a_hi;
681
+ __m256 b_lo, b_hi;
682
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
683
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
684
+ auto max_lo = _mm256_max_ps(a_lo, b_lo);
685
+ auto max_hi = _mm256_max_ps(a_hi, b_hi);
686
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
687
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
688
+ // Exploit the fact that all-ones is a NaN.
689
+ auto o1 = _mm256_or_ps(max_lo, nan_lo);
690
+ auto o2 = _mm256_or_ps(max_hi, nan_hi);
691
+ return cvtfp32_bf16(o1, o2);
692
+ }
693
+
694
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
695
+ // either input is a NaN.
696
+ template <>
697
+ Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
698
+ __m256 a_lo, a_hi;
699
+ __m256 b_lo, b_hi;
700
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
701
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
702
+ auto min_lo = _mm256_min_ps(a_lo, b_lo);
703
+ auto min_hi = _mm256_min_ps(a_hi, b_hi);
704
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
705
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
706
+ // Exploit the fact that all-ones is a NaN.
707
+ auto o1 = _mm256_or_ps(min_lo, nan_lo);
708
+ auto o2 = _mm256_or_ps(min_hi, nan_hi);
709
+ return cvtfp32_bf16(o1, o2);
710
+ }
711
+
712
+ template <>
713
+ Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
714
+ const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
715
+ __m256 a_lo, a_hi;
716
+ __m256 min_lo, min_hi;
717
+ __m256 max_lo, max_hi;
718
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
719
+ cvtbf16_fp32(__m256i(min), min_lo, min_hi);
720
+ cvtbf16_fp32(__m256i(max), max_lo, max_hi);
721
+ auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
722
+ auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
723
+ return cvtfp32_bf16(o1, o2);
724
+ }
725
+
726
+ template <>
727
+ Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
728
+ __m256 a_lo, a_hi;
729
+ __m256 max_lo, max_hi;
730
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
731
+ cvtbf16_fp32(__m256i(max), max_lo, max_hi);
732
+ auto o1 = _mm256_min_ps(max_lo, a_lo);
733
+ auto o2 = _mm256_min_ps(max_hi, a_hi);
734
+ return cvtfp32_bf16(o1, o2);
735
+ }
736
+
737
+ template <>
738
+ Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
739
+ __m256 a_lo, a_hi;
740
+ __m256 min_lo, min_hi;
741
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
742
+ cvtbf16_fp32(__m256i(min), min_lo, min_hi);
743
+ auto o1 = _mm256_max_ps(min_lo, a_lo);
744
+ auto o2 = _mm256_max_ps(min_hi, a_hi);
745
+ return cvtfp32_bf16(o1, o2);
746
+ }
747
+
748
+ template <>
749
+ inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
750
+ int64_t i;
751
+ #pragma unroll
752
+ for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
753
+ auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
754
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
755
+ }
756
+ #pragma unroll
757
+ for (; i < n; i++) {
758
+ dst[i] = src[i];
759
+ }
760
+ }
761
+
762
+ template <>
763
+ inline void convert(const float* src, BFloat16* dst, int64_t n) {
764
+ int64_t i;
765
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
766
+ __m256 a = _mm256_loadu_ps(&src[i]);
767
+ __m256 b = _mm256_loadu_ps(&src[i + 8]);
768
+
769
+ __m256i bf = cvtfp32_bf16(a, b);
770
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
771
+ }
772
+ for (; i < n; i++) {
773
+ dst[i] = c10::convert<BFloat16>(src[i]);
774
+ }
775
+ }
776
+
777
+ template <>
778
+ inline void convert(const double* src, BFloat16* dst, int64_t n) {
779
+ auto load_float = [](const double *src) -> __m256 {
780
+ // Load one float vector from an array of doubles
781
+ __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
782
+ __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
783
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
784
+ };
785
+
786
+ int64_t i;
787
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
788
+ __m256 a = load_float(&src[i]);
789
+ __m256 b = load_float(&src[i + 8]);
790
+
791
+ __m256i bf = cvtfp32_bf16(a, b);
792
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
793
+ }
794
+ for (; i < n; i++) {
795
+ dst[i] = c10::convert<BFloat16>(src[i]);
796
+ }
797
+ }
798
+
799
+ template <>
800
+ Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
801
+ const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
802
+ __m256 a_lo, a_hi;
803
+ __m256 b_lo, b_hi;
804
+ __m256 c_lo, c_hi;
805
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
806
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
807
+ cvtbf16_fp32(__m256i(c), c_lo, c_hi);
808
+ auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
809
+ auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
810
+ return cvtfp32_bf16(o1, o2);
811
+ }
812
+
813
+ template <>
814
+ class Vectorized<Half>: public Vectorized16<Half> {
815
+ public:
816
+ using Vectorized16::Vectorized16;
817
+
818
+ Vectorized<Half> frac() const;
819
+
820
+ Vectorized<Half> eq(const Vectorized<Half>& other) const;
821
+ Vectorized<Half> ne(const Vectorized<Half>& other) const;
822
+ Vectorized<Half> gt(const Vectorized<Half>& other) const;
823
+ Vectorized<Half> ge(const Vectorized<Half>& other) const;
824
+ Vectorized<Half> lt(const Vectorized<Half>& other) const;
825
+ Vectorized<Half> le(const Vectorized<Half>& other) const;
826
+ };
827
+
828
+ Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
829
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
830
+ }
831
+ Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
832
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
833
+ }
834
+ Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
835
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
836
+ }
837
+ Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
838
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
839
+ }
840
+ Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
841
+ return _mm256_and_si256(a, b);
842
+ }
843
+ Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
844
+ return _mm256_or_si256(a, b);
845
+ }
846
+ Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
847
+ return _mm256_xor_si256(a, b);
848
+ }
849
+
850
+ inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
851
+ return (*this == other) & Vectorized<Half>(1.0f);
852
+ }
853
+ inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
854
+ return (*this != other) & Vectorized<Half>(1.0f);
855
+ }
856
+ inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
857
+ return (*this > other) & Vectorized<Half>(1.0f);
858
+ }
859
+ inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
860
+ return (*this >= other) & Vectorized<Half>(1.0f);
861
+ }
862
+ inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
863
+ return (*this < other) & Vectorized<Half>(1.0f);
864
+ }
865
+ inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
866
+ return (*this <= other) & Vectorized<Half>(1.0f);
867
+ }
868
+
869
+ // frac. Implement this here so we can use subtraction
870
+ inline Vectorized<Half> Vectorized<Half>::frac() const {
871
+ return *this - this->trunc();
872
+ }
873
+
874
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
875
+ // either input is a NaN.
876
+ template <>
877
+ Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
878
+ __m256 a_lo, a_hi;
879
+ __m256 b_lo, b_hi;
880
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
881
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
882
+ auto max_lo = _mm256_max_ps(a_lo, b_lo);
883
+ auto max_hi = _mm256_max_ps(a_hi, b_hi);
884
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
885
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
886
+ // Exploit the fact that all-ones is a NaN.
887
+ auto o1 = _mm256_or_ps(max_lo, nan_lo);
888
+ auto o2 = _mm256_or_ps(max_hi, nan_hi);
889
+ return cvtfp32_fp16(o1, o2);
890
+ }
891
+
892
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
893
+ // either input is a NaN.
894
+ template <>
895
+ Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
896
+ __m256 a_lo, a_hi;
897
+ __m256 b_lo, b_hi;
898
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
899
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
900
+ auto min_lo = _mm256_min_ps(a_lo, b_lo);
901
+ auto min_hi = _mm256_min_ps(a_hi, b_hi);
902
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
903
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
904
+ // Exploit the fact that all-ones is a NaN.
905
+ auto o1 = _mm256_or_ps(min_lo, nan_lo);
906
+ auto o2 = _mm256_or_ps(min_hi, nan_hi);
907
+ return cvtfp32_fp16(o1, o2);
908
+ }
909
+
910
+ template <>
911
+ Vectorized<Half> inline clamp(const Vectorized<Half>& a,
912
+ const Vectorized<Half>& min, const Vectorized<Half>& max) {
913
+ __m256 a_lo, a_hi;
914
+ __m256 min_lo, min_hi;
915
+ __m256 max_lo, max_hi;
916
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
917
+ cvtfp16_fp32(__m256i(min), min_lo, min_hi);
918
+ cvtfp16_fp32(__m256i(max), max_lo, max_hi);
919
+ auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
920
+ auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
921
+ return cvtfp32_fp16(o1, o2);
922
+ }
923
+
924
+ template <>
925
+ Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
926
+ __m256 a_lo, a_hi;
927
+ __m256 max_lo, max_hi;
928
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
929
+ cvtfp16_fp32(__m256i(max), max_lo, max_hi);
930
+ auto o1 = _mm256_min_ps(max_lo, a_lo);
931
+ auto o2 = _mm256_min_ps(max_hi, a_hi);
932
+ return cvtfp32_fp16(o1, o2);
933
+ }
934
+
935
+ template <>
936
+ Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
937
+ __m256 a_lo, a_hi;
938
+ __m256 min_lo, min_hi;
939
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
940
+ cvtfp16_fp32(__m256i(min), min_lo, min_hi);
941
+ auto o1 = _mm256_max_ps(min_lo, a_lo);
942
+ auto o2 = _mm256_max_ps(min_hi, a_hi);
943
+ return cvtfp32_fp16(o1, o2);
944
+ }
945
+
946
+ template <>
947
+ inline void convert(const Half* src, Half* dst, int64_t n) {
948
+ int64_t i;
949
+ #pragma unroll
950
+ for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
951
+ auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
952
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
953
+ }
954
+ #pragma unroll
955
+ for (; i < n; i++) {
956
+ dst[i] = src[i];
957
+ }
958
+ }
959
+
960
+ template <>
961
+ inline void convert(const float* src, Half* dst, int64_t n) {
962
+ int64_t i;
963
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
964
+ __m256 a = _mm256_loadu_ps(&src[i]);
965
+ __m256 b = _mm256_loadu_ps(&src[i + 8]);
966
+
967
+ __m256i c = cvtfp32_fp16(a, b);
968
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
969
+ }
970
+ for (; i < n; i++) {
971
+ dst[i] = c10::convert<Half>(src[i]);
972
+ }
973
+ }
974
+
975
+ template <>
976
+ inline void convert(const double* src, Half* dst, int64_t n) {
977
+ auto load_float = [](const double *src) -> __m256 {
978
+ // Load one float vector from an array of doubles
979
+ __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
980
+ __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
981
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
982
+ };
983
+
984
+ int64_t i;
985
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
986
+ __m256 a = load_float(&src[i]);
987
+ __m256 b = load_float(&src[i + 8]);
988
+
989
+ __m256i c = cvtfp32_fp16(a, b);
990
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
991
+ }
992
+ for (; i < n; i++) {
993
+ dst[i] = c10::convert<Half>(src[i]);
994
+ }
995
+ }
996
+
997
+ template <>
998
+ Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
999
+ const Vectorized<Half>& b, const Vectorized<Half>& c) {
1000
+ __m256 a_lo, a_hi;
1001
+ __m256 b_lo, b_hi;
1002
+ __m256 c_lo, c_hi;
1003
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
1004
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
1005
+ cvtfp16_fp32(__m256i(c), c_lo, c_hi);
1006
+ auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
1007
+ auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
1008
+ return cvtfp32_fp16(o1, o2);
1009
+ }
1010
+
1011
+ #define CONVERT_VECTORIZED_INIT(type, name) \
1012
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1013
+ __m256 o1, o2; \
1014
+ cvt_to_fp32<type>(__m256i(a), o1, o2); \
1015
+ return std::make_tuple(o1, o2); \
1016
+ } \
1017
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1018
+ return cvt_from_fp32<type>(__m256(a), __m256(b)); \
1019
+ }
1020
+ CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
1021
+ CONVERT_VECTORIZED_INIT(Half, half);
1022
+
1023
+ #else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1024
+
1025
+ #define CONVERT_NON_VECTORIZED_INIT(type, name) \
1026
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1027
+ constexpr int64_t K = Vectorized<type>::size(); \
1028
+ __at_align__ float arr[K]; \
1029
+ __at_align__ type arr2[K]; \
1030
+ a.store(arr2); \
1031
+ convert(arr2, arr, K); \
1032
+ return std::make_tuple( \
1033
+ Vectorized<float>::loadu(arr), \
1034
+ Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
1035
+ } \
1036
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1037
+ constexpr int64_t K = Vectorized<type>::size(); \
1038
+ __at_align__ float arr[K]; \
1039
+ __at_align__ type arr2[K]; \
1040
+ a.store(arr); \
1041
+ b.store(arr + Vectorized<float>::size()); \
1042
+ convert(arr, arr2, K); \
1043
+ return Vectorized<type>::loadu(arr2); \
1044
+ }
1045
+ CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
1046
+ CONVERT_NON_VECTORIZED_INIT(Half, half);
1047
+
1048
+ #endif // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1049
+
1050
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1051
+ #define LOAD_FP32_VECTORIZED_INIT(type, name) \
1052
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1053
+ auto values = _mm_loadu_si128(reinterpret_cast<const __m128i*>(data)); \
1054
+ __m256 out_values; \
1055
+ cvt_to_fp32<type>(values, out_values); \
1056
+ out = out_values; \
1057
+ } \
1058
+ \
1059
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1060
+ auto vec = Vectorized<type>::loadu(data); \
1061
+ __m256 out1_values, out2_values; \
1062
+ cvt_to_fp32<type>(vec, out1_values, out2_values); \
1063
+ out1 = out1_values; \
1064
+ out2 = out2_values; \
1065
+ }
1066
+ LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
1067
+ LOAD_FP32_VECTORIZED_INIT(Half, fp16);
1068
+
1069
+ #else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1070
+ #define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
1071
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1072
+ __at_align__ float values[Vectorized<float>::size()]; \
1073
+ for (const auto k : c10::irange(Vectorized<float>::size())) { \
1074
+ values[k] = data[k]; \
1075
+ } \
1076
+ out = Vectorized<float>::loadu(values); \
1077
+ } \
1078
+ \
1079
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1080
+ load_fp32_from_##name(data, out1); \
1081
+ data += Vectorized<float>::size(); \
1082
+ load_fp32_from_##name(data, out2); \
1083
+ }
1084
+ LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1085
+ LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1086
+
1087
+ #endif
1088
+ }} // namsepace at::vec::CPU_CAPABILITY
1089
+
1090
+ #pragma GCC diagnostic pop
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+
11
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
12
+ #include <sleef.h>
13
+ #endif
14
+
15
+ namespace at::vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
20
+
21
+ template <> class Vectorized<c10::complex<double>> {
22
+ private:
23
+ __m256d values;
24
+ public:
25
+ using value_type = c10::complex<double>;
26
+ using size_type = int;
27
+ static constexpr size_type size() {
28
+ return 2;
29
+ }
30
+ Vectorized() {}
31
+ Vectorized(__m256d v) : values(v) {}
32
+ Vectorized(c10::complex<double> val) {
33
+ double real_value = val.real();
34
+ double imag_value = val.imag();
35
+ values = _mm256_setr_pd(real_value, imag_value,
36
+ real_value, imag_value);
37
+ }
38
+ Vectorized(c10::complex<double> val1, c10::complex<double> val2) {
39
+ values = _mm256_setr_pd(val1.real(), val1.imag(),
40
+ val2.real(), val2.imag());
41
+ }
42
+ operator __m256d() const {
43
+ return values;
44
+ }
45
+ template <int64_t mask>
46
+ static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
47
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
48
+ static_assert (mask > -1 && mask < 4, "Unexpected mask value");
49
+ switch (mask) {
50
+ case 0:
51
+ return a;
52
+ case 1:
53
+ return _mm256_blend_pd(a.values, b.values, 0x03);
54
+ case 2:
55
+ return _mm256_blend_pd(a.values, b.values, 0x0c);
56
+ case 3: break;
57
+ }
58
+ return b;
59
+ }
60
+ static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
61
+ const Vectorized<c10::complex<double>>& mask) {
62
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
63
+ auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values);
64
+ return _mm256_blendv_pd(a.values, b.values, mask_);
65
+
66
+ }
67
+ template<typename step_t>
68
+ static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0., step_t step = static_cast<step_t>(1)) {
69
+ return Vectorized<c10::complex<double>>(base,
70
+ base + step);
71
+ }
72
+ static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
73
+ int64_t count = size()) {
74
+ switch (count) {
75
+ case 0:
76
+ return a;
77
+ case 1:
78
+ return blend<1>(a, b);
79
+ }
80
+ return b;
81
+ }
82
+ static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
83
+ if (count == size())
84
+ return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
85
+
86
+ __at_align__ double tmp_values[2*size()];
87
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
88
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
89
+ // instructions while a loop would be compiled to one instruction.
90
+ for (const auto i : c10::irange(2*size())) {
91
+ tmp_values[i] = 0.0;
92
+ }
93
+ std::memcpy(
94
+ tmp_values,
95
+ reinterpret_cast<const double*>(ptr),
96
+ count * sizeof(c10::complex<double>));
97
+ return _mm256_load_pd(tmp_values);
98
+ }
99
+ void store(void* ptr, int count = size()) const {
100
+ if (count == size()) {
101
+ _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
102
+ } else if (count > 0) {
103
+ double tmp_values[2*size()];
104
+ _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
105
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
106
+ }
107
+ }
108
+ const c10::complex<double>& operator[](int idx) const = delete;
109
+ c10::complex<double>& operator[](int idx) = delete;
110
+ Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
111
+ __at_align__ c10::complex<double> tmp[size()];
112
+ store(tmp);
113
+ for (const auto i : c10::irange(size())) {
114
+ tmp[i] = f(tmp[i]);
115
+ }
116
+ return loadu(tmp);
117
+ }
118
+ __m256d abs_2_() const {
119
+ auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
120
+ return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
121
+ }
122
+ __m256d abs_() const {
123
+ auto real = _mm256_movedup_pd(values); // real real
124
+ // movehdup_pd does not exist...
125
+ auto imag = _mm256_permute_pd(values, 0xf); // imag imag
126
+ return Sleef_hypotd4_u05(real, imag); // abs abs
127
+ }
128
+ Vectorized<c10::complex<double>> abs() const {
129
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
130
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
131
+ return _mm256_and_pd(abs_(), real_mask); // abs 0
132
+ }
133
+ __m256d angle_() const {
134
+ //angle = atan2(b/a)
135
+ auto b_a = _mm256_permute_pd(values, 0x05); // b a
136
+ return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
137
+ }
138
+ Vectorized<c10::complex<double>> angle() const {
139
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
140
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
141
+ auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle
142
+ return _mm256_and_pd(angle, real_mask); // angle 0
143
+ }
144
+ Vectorized<c10::complex<double>> sgn() const {
145
+ auto abs = abs_();
146
+ auto zero = _mm256_setzero_pd();
147
+ auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ);
148
+ auto div = values / abs;
149
+ return _mm256_blendv_pd(div, zero, mask);
150
+ }
151
+ __m256d real_() const {
152
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
153
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
154
+ return _mm256_and_pd(values, real_mask);
155
+ }
156
+ Vectorized<c10::complex<double>> real() const {
157
+ return real_();
158
+ }
159
+ __m256d imag_() const {
160
+ const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
161
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
162
+ return _mm256_and_pd(values, imag_mask);
163
+ }
164
+ Vectorized<c10::complex<double>> imag() const {
165
+ return _mm256_permute_pd(imag_(), 0x05); //b a
166
+ }
167
+ __m256d conj_() const {
168
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
169
+ return _mm256_xor_pd(values, sign_mask); // a -b
170
+ }
171
+ Vectorized<c10::complex<double>> conj() const {
172
+ return conj_();
173
+ }
174
+ Vectorized<c10::complex<double>> log() const {
175
+ // Most trigonomic ops use the log() op to improve complex number performance.
176
+ return map(std::log);
177
+ }
178
+ Vectorized<c10::complex<double>> log2() const {
179
+ const __m256d log2_ = _mm256_set1_pd(std::log(2));
180
+ return _mm256_div_pd(log(), log2_);
181
+ }
182
+ Vectorized<c10::complex<double>> log10() const {
183
+ const __m256d log10_ = _mm256_set1_pd(std::log(10));
184
+ return _mm256_div_pd(log(), log10_);
185
+ }
186
+ Vectorized<c10::complex<double>> log1p() const {
187
+ return map(std::log1p);
188
+ }
189
+ Vectorized<c10::complex<double>> asin() const {
190
+ // asin(x)
191
+ // = -i*ln(iz + sqrt(1 -z^2))
192
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
193
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
194
+ const __m256d one = _mm256_set1_pd(1);
195
+
196
+ auto conj = conj_();
197
+ auto b_a = _mm256_permute_pd(conj, 0x05); //-b a
198
+ auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab
199
+ auto im = _mm256_add_pd(ab, ab); //-2ab -2ab
200
+
201
+ auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
202
+ auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a
203
+ re = _mm256_sub_pd(one, re);
204
+
205
+ auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im)
206
+ auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt())
207
+ return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln()
208
+ }
209
+ Vectorized<c10::complex<double>> acos() const {
210
+ // acos(x) = pi/2 - asin(x)
211
+ constexpr auto pi_2d = c10::pi<double> / 2;
212
+ const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0);
213
+ return _mm256_sub_pd(pi_2, asin());
214
+ }
215
+ Vectorized<c10::complex<double>> atan() const;
216
+ Vectorized<c10::complex<double>> atanh() const {
217
+ return map(std::atanh);
218
+ }
219
+ Vectorized<c10::complex<double>> exp() const {
220
+ //exp(a + bi)
221
+ // = exp(a)*(cos(b) + sin(b)i)
222
+ auto exp = Sleef_expd4_u10(values); //exp(a) exp(b)
223
+ exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a)
224
+
225
+ auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
226
+ auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05),
227
+ sin_cos.x, 0x0A); //cos(b) sin(b)
228
+ return _mm256_mul_pd(exp, cos_sin);
229
+ }
230
+ Vectorized<c10::complex<double>> exp2() const {
231
+ // Use identity 2**x = exp(log(2) * x)
232
+ const __m256d ln_2 = _mm256_set1_pd(c10::ln_2<double>);
233
+ Vectorized<c10::complex<double>> scaled_values = _mm256_mul_pd(values, ln_2);
234
+ return scaled_values.exp();
235
+ }
236
+ Vectorized<c10::complex<double>> expm1() const {
237
+ return map(std::expm1);
238
+ }
239
+ Vectorized<c10::complex<double>> sin() const {
240
+ return map(std::sin);
241
+ }
242
+ Vectorized<c10::complex<double>> sinh() const {
243
+ return map(std::sinh);
244
+ }
245
+ Vectorized<c10::complex<double>> cos() const {
246
+ return map(std::cos);
247
+ }
248
+ Vectorized<c10::complex<double>> cosh() const {
249
+ return map(std::cosh);
250
+ }
251
+ Vectorized<c10::complex<double>> ceil() const {
252
+ return _mm256_ceil_pd(values);
253
+ }
254
+ Vectorized<c10::complex<double>> floor() const {
255
+ return _mm256_floor_pd(values);
256
+ }
257
+ Vectorized<c10::complex<double>> neg() const {
258
+ auto zero = _mm256_setzero_pd();
259
+ return _mm256_sub_pd(zero, values);
260
+ }
261
+ Vectorized<c10::complex<double>> round() const {
262
+ return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
263
+ }
264
+ Vectorized<c10::complex<double>> tan() const {
265
+ return map(std::tan);
266
+ }
267
+ Vectorized<c10::complex<double>> tanh() const {
268
+ return map(std::tanh);
269
+ }
270
+ Vectorized<c10::complex<double>> trunc() const {
271
+ return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
272
+ }
273
+ Vectorized<c10::complex<double>> sqrt() const {
274
+ return map(std::sqrt);
275
+ }
276
+ Vectorized<c10::complex<double>> reciprocal() const;
277
+ Vectorized<c10::complex<double>> rsqrt() const {
278
+ return sqrt().reciprocal();
279
+ }
280
+ Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
281
+ __at_align__ c10::complex<double> x_tmp[size()];
282
+ __at_align__ c10::complex<double> y_tmp[size()];
283
+ store(x_tmp);
284
+ exp.store(y_tmp);
285
+ for (const auto i : c10::irange(size())) {
286
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
287
+ }
288
+ return loadu(x_tmp);
289
+ }
290
+ // Comparison using the _CMP_**_OQ predicate.
291
+ // `O`: get false if an operand is NaN
292
+ // `Q`: do not raise if an operand is NaN
293
+ Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
294
+ return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
295
+ }
296
+ Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
297
+ return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
298
+ }
299
+ Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>&) const {
300
+ TORCH_CHECK(false, "not supported for complex numbers");
301
+ }
302
+ Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>&) const {
303
+ TORCH_CHECK(false, "not supported for complex numbers");
304
+ }
305
+ Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>&) const {
306
+ TORCH_CHECK(false, "not supported for complex numbers");
307
+ }
308
+ Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>&) const {
309
+ TORCH_CHECK(false, "not supported for complex numbers");
310
+ }
311
+
312
+ Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
313
+ Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
314
+ };
315
+
316
+ template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
317
+ return _mm256_add_pd(a, b);
318
+ }
319
+
320
+ template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
321
+ return _mm256_sub_pd(a, b);
322
+ }
323
+
324
+ template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
325
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
326
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
327
+ auto ac_bd = _mm256_mul_pd(a, b); //ac bd
328
+
329
+ auto d_c = _mm256_permute_pd(b, 0x05); //d c
330
+ d_c = _mm256_xor_pd(sign_mask, d_c); //d -c
331
+ auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc
332
+
333
+ auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
334
+ return ret;
335
+ }
336
+
337
+ template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
338
+ //re + im*i = (a + bi) / (c + di)
339
+ auto mask = _mm256_set1_pd(-0.f);
340
+ auto fabs_cd = _mm256_andnot_pd(mask, b); // |c| |d|
341
+ auto fabs_dc = _mm256_permute_pd(fabs_cd, 0x05); // |d| |c|
342
+ auto scale = _mm256_div_pd(_mm256_set1_pd(1.0f), _mm256_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
343
+ auto a2 = _mm256_mul_pd(a, scale); // a/sc b/sc
344
+ auto b2 = _mm256_mul_pd(b, scale); // c/sc d/sc
345
+ auto acbd2 = _mm256_mul_pd(a2, b2);
346
+
347
+ const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0);
348
+ auto dc2 = _mm256_permute_pd(b2, 0x05); // d/sc c/sc
349
+ dc2 = _mm256_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
350
+ auto adbc2 = _mm256_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
351
+ auto res2 = _mm256_hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
352
+
353
+ // get the denominator
354
+ auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
355
+ res2 = _mm256_div_pd(res2, denom2);
356
+ return res2;
357
+ }
358
+
359
+ // reciprocal. Implement this here so we can use multiplication.
360
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
361
+ //re + im*i = (a + bi) / (c + di)
362
+ //re = (ac + bd)/abs_2() = c/abs_2()
363
+ //im = (bc - ad)/abs_2() = d/abs_2()
364
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
365
+ auto c_d = _mm256_xor_pd(sign_mask, values); //c -d
366
+ return _mm256_div_pd(c_d, abs_2_());
367
+ }
368
+
369
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
370
+ // atan(x) = i/2 * ln((i + z)/(i - z))
371
+ const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0);
372
+ const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5);
373
+
374
+ auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b
375
+ auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b
376
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
377
+ return i_half*ln; // i/2*ln()
378
+ }
379
+
380
+ template <>
381
+ Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
382
+ auto abs_a = a.abs_2_();
383
+ auto abs_b = b.abs_2_();
384
+ auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ);
385
+ auto max = _mm256_blendv_pd(a, b, mask);
386
+ // Exploit the fact that all-ones is a NaN.
387
+ auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
388
+ return _mm256_or_pd(max, isnan);
389
+ }
390
+
391
+ template <>
392
+ Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
393
+ auto abs_a = a.abs_2_();
394
+ auto abs_b = b.abs_2_();
395
+ auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ);
396
+ auto min = _mm256_blendv_pd(a, b, mask);
397
+ // Exploit the fact that all-ones is a NaN.
398
+ auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
399
+ return _mm256_or_pd(min, isnan);
400
+ }
401
+
402
+ template <>
403
+ Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
404
+ return _mm256_and_pd(a, b);
405
+ }
406
+
407
+ template <>
408
+ Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
409
+ return _mm256_or_pd(a, b);
410
+ }
411
+
412
+ template <>
413
+ Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
414
+ return _mm256_xor_pd(a, b);
415
+ }
416
+
417
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
418
+ auto eq = (*this == other); // compares real and imag individually
419
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
420
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
421
+ }
422
+
423
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
424
+ auto ne = (*this != other); // compares real and imag individually
425
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
426
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
427
+ }
428
+
429
+ #endif
430
+
431
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at::vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<c10::complex<float>> {
21
+ private:
22
+ __m256 values;
23
+ public:
24
+ using value_type = c10::complex<float>;
25
+ using size_type = int;
26
+ static constexpr size_type size() {
27
+ return 4;
28
+ }
29
+ Vectorized() {}
30
+ Vectorized(__m256 v) : values(v) {}
31
+ Vectorized(c10::complex<float> val) {
32
+ float real_value = val.real();
33
+ float imag_value = val.imag();
34
+ values = _mm256_setr_ps(real_value, imag_value,
35
+ real_value, imag_value,
36
+ real_value, imag_value,
37
+ real_value, imag_value
38
+ );
39
+ }
40
+ Vectorized(c10::complex<float> val1, c10::complex<float> val2, c10::complex<float> val3, c10::complex<float> val4) {
41
+ values = _mm256_setr_ps(val1.real(), val1.imag(),
42
+ val2.real(), val2.imag(),
43
+ val3.real(), val3.imag(),
44
+ val4.real(), val4.imag()
45
+ );
46
+ }
47
+ operator __m256() const {
48
+ return values;
49
+ }
50
+ template <int64_t mask>
51
+ static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
52
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
53
+ static_assert(mask > -1 && mask < 16, "Unexpected mask range");
54
+ switch (mask) {
55
+ case 0:
56
+ return a;
57
+ case 1:
58
+ return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011
59
+ case 2:
60
+ return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100
61
+ case 3:
62
+ return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111
63
+ case 4:
64
+ return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000
65
+ case 5:
66
+ return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011
67
+ case 6:
68
+ return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100
69
+ case 7:
70
+ return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111
71
+ case 8:
72
+ return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000
73
+ case 9:
74
+ return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011
75
+ case 10:
76
+ return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100
77
+ case 11:
78
+ return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111
79
+ case 12:
80
+ return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000
81
+ case 13:
82
+ return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011
83
+ case 14:
84
+ return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100
85
+ default: break;
86
+ }
87
+ return b;
88
+ }
89
+ static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
90
+ const Vectorized<c10::complex<float>>& mask) {
91
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
92
+ auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values);
93
+ return _mm256_blendv_ps(a.values, b.values, mask_);
94
+
95
+ }
96
+ template<typename step_t>
97
+ static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0., step_t step = static_cast<step_t>(1)) {
98
+ return Vectorized<c10::complex<float>>(base,
99
+ base + step,
100
+ base + c10::complex<float>(2)*step,
101
+ base + c10::complex<float>(3)*step);
102
+ }
103
+ static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
104
+ int64_t count = size()) {
105
+ switch (count) {
106
+ case 0:
107
+ return a;
108
+ case 1:
109
+ return blend<1>(a, b);
110
+ case 2:
111
+ return blend<3>(a, b);
112
+ case 3:
113
+ return blend<7>(a, b);
114
+ }
115
+ return b;
116
+ }
117
+ static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
118
+ if (count == size())
119
+ return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
120
+
121
+ __at_align__ float tmp_values[2*size()];
122
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
123
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
124
+ // instructions while a loop would be compiled to one instruction.
125
+ for (const auto i : c10::irange(2*size())) {
126
+ tmp_values[i] = 0.0;
127
+ }
128
+ std::memcpy(
129
+ tmp_values,
130
+ reinterpret_cast<const float*>(ptr),
131
+ count * sizeof(c10::complex<float>));
132
+ return _mm256_load_ps(tmp_values);
133
+ }
134
+ void store(void* ptr, int count = size()) const {
135
+ if (count == size()) {
136
+ _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
137
+ } else if (count > 0) {
138
+ float tmp_values[2*size()];
139
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
140
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
141
+ }
142
+ }
143
+ const c10::complex<float>& operator[](int idx) const = delete;
144
+ c10::complex<float>& operator[](int idx) = delete;
145
+ Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
146
+ __at_align__ c10::complex<float> tmp[size()];
147
+ store(tmp);
148
+ for (const auto i : c10::irange(size())) {
149
+ tmp[i] = f(tmp[i]);
150
+ }
151
+ return loadu(tmp);
152
+ }
153
+ __m256 abs_2_() const {
154
+ auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
155
+ auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
156
+ return _mm256_permute_ps(ret, 0xD8);
157
+ }
158
+ __m256 abs_() const {
159
+ auto real = _mm256_moveldup_ps(values); // real real
160
+ auto imag = _mm256_movehdup_ps(values); // imag imag
161
+ return Sleef_hypotf8_u05(real, imag); // abs abs
162
+ }
163
+ Vectorized<c10::complex<float>> abs() const {
164
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
165
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
166
+ return _mm256_and_ps(abs_(), real_mask); // abs 0
167
+ }
168
+ __m256 angle_() const {
169
+ //angle = atan2(b/a)
170
+ auto b_a = _mm256_permute_ps(values, 0xB1); // b a
171
+ return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
172
+ }
173
+ Vectorized<c10::complex<float>> angle() const {
174
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
175
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
176
+ auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle
177
+ return _mm256_and_ps(angle, real_mask); // angle 0
178
+ }
179
+ Vectorized<c10::complex<float>> sgn() const {
180
+ auto abs = abs_();
181
+ auto zero = _mm256_setzero_ps();
182
+ auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ);
183
+ auto div = values / abs;
184
+ return _mm256_blendv_ps(div, zero, mask);
185
+ }
186
+ __m256 real_() const {
187
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
188
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
189
+ return _mm256_and_ps(values, real_mask);
190
+ }
191
+ Vectorized<c10::complex<float>> real() const {
192
+ return real_();
193
+ }
194
+ __m256 imag_() const {
195
+ const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
196
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
197
+ return _mm256_and_ps(values, imag_mask);
198
+ }
199
+ Vectorized<c10::complex<float>> imag() const {
200
+ return _mm256_permute_ps(imag_(), 0xB1); //b a
201
+ }
202
+ __m256 conj_() const {
203
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
204
+ return _mm256_xor_ps(values, sign_mask); // a -b
205
+ }
206
+ Vectorized<c10::complex<float>> conj() const {
207
+ return conj_();
208
+ }
209
+ Vectorized<c10::complex<float>> log() const {
210
+ // Most trigonomic ops use the log() op to improve complex number performance.
211
+ return map(std::log);
212
+ }
213
+ Vectorized<c10::complex<float>> log2() const {
214
+ const __m256 log2_ = _mm256_set1_ps(std::log(2));
215
+ return _mm256_div_ps(log(), log2_);
216
+ }
217
+ Vectorized<c10::complex<float>> log10() const {
218
+ const __m256 log10_ = _mm256_set1_ps(std::log(10));
219
+ return _mm256_div_ps(log(), log10_);
220
+ }
221
+ Vectorized<c10::complex<float>> log1p() const {
222
+ return map(std::log1p);
223
+ }
224
+ Vectorized<c10::complex<float>> asin() const {
225
+ // asin(x)
226
+ // = -i*ln(iz + sqrt(1 -z^2))
227
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
228
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
229
+ const __m256 one = _mm256_set1_ps(1);
230
+
231
+ auto conj = conj_();
232
+ auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a
233
+ auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab
234
+ auto im = _mm256_add_ps(ab, ab); //-2ab -2ab
235
+
236
+ auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
237
+ auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
238
+ re = _mm256_permute_ps(re, 0xD8);
239
+ re = _mm256_sub_ps(one, re);
240
+
241
+ auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im)
242
+ auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt())
243
+ return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
244
+ }
245
+ Vectorized<c10::complex<float>> acos() const {
246
+ return map(std::acos);
247
+ }
248
+ Vectorized<c10::complex<float>> atan() const;
249
+ Vectorized<c10::complex<float>> atanh() const {
250
+ return map(std::atanh);
251
+ }
252
+ Vectorized<c10::complex<float>> exp() const {
253
+ //exp(a + bi)
254
+ // = exp(a)*(cos(b) + sin(b)i)
255
+ auto exp = Sleef_expf8_u10(values); //exp(a) exp(b)
256
+ exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a)
257
+
258
+ auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
259
+ auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1),
260
+ sin_cos.x, 0xAA); //cos(b) sin(b)
261
+ return _mm256_mul_ps(exp, cos_sin);
262
+ }
263
+ Vectorized<c10::complex<float>> exp2() const {
264
+ // Use identity 2**x = exp(log(2) * x)
265
+ const __m256 ln_2 = _mm256_set1_ps(c10::ln_2<float>);
266
+ Vectorized<c10::complex<float>> scaled_values = _mm256_mul_ps(values, ln_2);
267
+ return scaled_values.exp();
268
+ }
269
+ Vectorized<c10::complex<float>> expm1() const {
270
+ return map(std::expm1);
271
+ }
272
+ Vectorized<c10::complex<float>> sin() const {
273
+ return map(std::sin);
274
+ }
275
+ Vectorized<c10::complex<float>> sinh() const {
276
+ return map(std::sinh);
277
+ }
278
+ Vectorized<c10::complex<float>> cos() const {
279
+ return map(std::cos);
280
+ }
281
+ Vectorized<c10::complex<float>> cosh() const {
282
+ return map(std::cosh);
283
+ }
284
+ Vectorized<c10::complex<float>> ceil() const {
285
+ return _mm256_ceil_ps(values);
286
+ }
287
+ Vectorized<c10::complex<float>> floor() const {
288
+ return _mm256_floor_ps(values);
289
+ }
290
+ Vectorized<c10::complex<float>> neg() const {
291
+ auto zero = _mm256_setzero_ps();
292
+ return _mm256_sub_ps(zero, values);
293
+ }
294
+ Vectorized<c10::complex<float>> round() const {
295
+ return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
296
+ }
297
+ Vectorized<c10::complex<float>> tan() const {
298
+ return map(std::tan);
299
+ }
300
+ Vectorized<c10::complex<float>> tanh() const {
301
+ return map(std::tanh);
302
+ }
303
+ Vectorized<c10::complex<float>> trunc() const {
304
+ return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
305
+ }
306
+ Vectorized<c10::complex<float>> sqrt() const {
307
+ return map(std::sqrt);
308
+ }
309
+ Vectorized<c10::complex<float>> reciprocal() const;
310
+ Vectorized<c10::complex<float>> rsqrt() const {
311
+ return sqrt().reciprocal();
312
+ }
313
+ Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
314
+ __at_align__ c10::complex<float> x_tmp[size()];
315
+ __at_align__ c10::complex<float> y_tmp[size()];
316
+ store(x_tmp);
317
+ exp.store(y_tmp);
318
+ for (const auto i : c10::irange(size())) {
319
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
320
+ }
321
+ return loadu(x_tmp);
322
+ }
323
+ // Comparison using the _CMP_**_OQ predicate.
324
+ // `O`: get false if an operand is NaN
325
+ // `Q`: do not raise if an operand is NaN
326
+ Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
327
+ return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
328
+ }
329
+ Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
330
+ return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
331
+ }
332
+ Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& /*other*/) const {
333
+ TORCH_CHECK(false, "not supported for complex numbers");
334
+ }
335
+ Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& /*other*/) const {
336
+ TORCH_CHECK(false, "not supported for complex numbers");
337
+ }
338
+ Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& /*other*/) const {
339
+ TORCH_CHECK(false, "not supported for complex numbers");
340
+ }
341
+ Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& /*other*/) const {
342
+ TORCH_CHECK(false, "not supported for complex numbers");
343
+ }
344
+
345
+ Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
346
+ Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
347
+ };
348
+
349
+ template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
350
+ return _mm256_add_ps(a, b);
351
+ }
352
+
353
+ template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
354
+ return _mm256_sub_ps(a, b);
355
+ }
356
+
357
+ template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
358
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
359
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
360
+ auto ac_bd = _mm256_mul_ps(a, b); //ac bd
361
+
362
+ auto d_c = _mm256_permute_ps(b, 0xB1); //d c
363
+ d_c = _mm256_xor_ps(sign_mask, d_c); //d -c
364
+ auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc
365
+
366
+ auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
367
+ ret = _mm256_permute_ps(ret, 0xD8);
368
+ return ret;
369
+ }
370
+
371
+ template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
372
+ //re + im*i = (a + bi) / (c + di)
373
+ auto mask = _mm256_set1_ps(-0.f);
374
+ auto fabs_cd = _mm256_andnot_ps(mask, b); // |c| |d|
375
+ auto fabs_dc = _mm256_permute_ps(fabs_cd, 0xB1); // |d| |c|
376
+ auto scale = _mm256_rcp_ps(_mm256_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
377
+ auto a2 = _mm256_mul_ps(a, scale); // a/sc b/sc
378
+ auto b2 = _mm256_mul_ps(b, scale); // c/sc d/sc
379
+ auto acbd2 = _mm256_mul_ps(a2, b2);
380
+
381
+ const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
382
+ auto dc2 = _mm256_permute_ps(b2, 0xB1); // d/sc c/sc
383
+ dc2 = _mm256_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
384
+ auto adbc2 = _mm256_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
385
+ auto res2 = _mm256_hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
386
+ res2 = _mm256_permute_ps(res2, 0xD8);
387
+
388
+ // get the denominator
389
+ auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
390
+ res2 = _mm256_div_ps(res2, denom2);
391
+ return res2;
392
+ }
393
+
394
+ // reciprocal. Implement this here so we can use multiplication.
395
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
396
+ //re + im*i = (a + bi) / (c + di)
397
+ //re = (ac + bd)/abs_2() = c/abs_2()
398
+ //im = (bc - ad)/abs_2() = d/abs_2()
399
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
400
+ auto c_d = _mm256_xor_ps(sign_mask, values); //c -d
401
+ return _mm256_div_ps(c_d, abs_2_());
402
+ }
403
+
404
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
405
+ // atan(x) = i/2 * ln((i + z)/(i - z))
406
+ const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
407
+ const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
408
+
409
+ auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b
410
+ auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b
411
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
412
+ return i_half*ln; // i/2*ln()
413
+ }
414
+
415
+ template <>
416
+ Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
417
+ auto abs_a = a.abs_2_();
418
+ auto abs_b = b.abs_2_();
419
+ auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
420
+ auto max = _mm256_blendv_ps(a, b, mask);
421
+ // Exploit the fact that all-ones is a NaN.
422
+ auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
423
+ return _mm256_or_ps(max, isnan);
424
+ }
425
+
426
+ template <>
427
+ Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
428
+ auto abs_a = a.abs_2_();
429
+ auto abs_b = b.abs_2_();
430
+ auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
431
+ auto min = _mm256_blendv_ps(a, b, mask);
432
+ // Exploit the fact that all-ones is a NaN.
433
+ auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
434
+ return _mm256_or_ps(min, isnan);
435
+ }
436
+
437
+ template <>
438
+ Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
439
+ return _mm256_and_ps(a, b);
440
+ }
441
+
442
+ template <>
443
+ Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
444
+ return _mm256_or_ps(a, b);
445
+ }
446
+
447
+ template <>
448
+ Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
449
+ return _mm256_xor_ps(a, b);
450
+ }
451
+
452
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
453
+ const Vectorized<c10::complex<float>>& other) const {
454
+ auto eq = (*this == other); // compares real and imag individually
455
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
456
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
457
+ }
458
+
459
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
460
+ const Vectorized<c10::complex<float>>& other) const {
461
+ auto ne = (*this != other); // compares real and imag individually
462
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
463
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
464
+ }
465
+
466
+ #endif
467
+
468
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at::vec {
14
+ // See Note [CPU_CAPABILITY namespace]
15
+ inline namespace CPU_CAPABILITY {
16
+
17
+
18
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<double> {
21
+ private:
22
+ __m256d values;
23
+ public:
24
+ using value_type = double;
25
+ using size_type = int;
26
+ static constexpr size_type size() {
27
+ return 4;
28
+ }
29
+ Vectorized() {}
30
+ Vectorized(__m256d v) : values(v) {}
31
+ Vectorized(double val) {
32
+ values = _mm256_set1_pd(val);
33
+ }
34
+ Vectorized(double val1, double val2, double val3, double val4) {
35
+ values = _mm256_setr_pd(val1, val2, val3, val4);
36
+ }
37
+ operator __m256d() const {
38
+ return values;
39
+ }
40
+ template <int64_t mask>
41
+ static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) {
42
+ return _mm256_blend_pd(a.values, b.values, mask);
43
+ }
44
+ static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b,
45
+ const Vectorized<double>& mask) {
46
+ return _mm256_blendv_pd(a.values, b.values, mask.values);
47
+ }
48
+ template<typename step_t>
49
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
50
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
51
+ }
52
+ static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b,
53
+ int64_t count = size()) {
54
+ switch (count) {
55
+ case 0:
56
+ return a;
57
+ case 1:
58
+ return blend<1>(a, b);
59
+ case 2:
60
+ return blend<3>(a, b);
61
+ case 3:
62
+ return blend<7>(a, b);
63
+ }
64
+ return b;
65
+ }
66
+ static Vectorized<double> loadu(const void* ptr, int64_t count = size()) {
67
+ if (count == size())
68
+ return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
69
+
70
+
71
+ __at_align__ double tmp_values[size()];
72
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
73
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
74
+ // instructions while a loop would be compiled to one instruction.
75
+ for (const auto i : c10::irange(size())) {
76
+ tmp_values[i] = 0.0;
77
+ }
78
+ std::memcpy(
79
+ tmp_values,
80
+ reinterpret_cast<const double*>(ptr),
81
+ count * sizeof(double));
82
+ return _mm256_load_pd(tmp_values);
83
+ }
84
+ void store(void* ptr, int count = size()) const {
85
+ if (count == size()) {
86
+ _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
87
+ } else if (count > 0) {
88
+ double tmp_values[size()];
89
+ _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
90
+ std::memcpy(ptr, tmp_values, count * sizeof(double));
91
+ }
92
+ }
93
+ const double& operator[](int idx) const = delete;
94
+ double& operator[](int idx) = delete;
95
+ int zero_mask() const {
96
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
97
+ __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ);
98
+ return _mm256_movemask_pd(cmp);
99
+ }
100
+ Vectorized<double> isnan() const {
101
+ return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q);
102
+ }
103
+ Vectorized<double> map(double (*const f)(double)) const {
104
+ __at_align__ double tmp[size()];
105
+ store(tmp);
106
+ for (const auto i : c10::irange(size())) {
107
+ tmp[i] = f(tmp[i]);
108
+ }
109
+ return loadu(tmp);
110
+ }
111
+ Vectorized<double> abs() const {
112
+ auto mask = _mm256_set1_pd(-0.f);
113
+ return _mm256_andnot_pd(mask, values);
114
+ }
115
+ Vectorized<double> angle() const {
116
+ const auto zero_vec = _mm256_set1_pd(0.f);
117
+ const auto nan_vec = _mm256_set1_pd(NAN);
118
+ const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ);
119
+ const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ);
120
+ const auto pi = _mm256_set1_pd(c10::pi<double>);
121
+
122
+ const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ);
123
+ auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask);
124
+ angle = _mm256_blendv_pd(angle, nan_vec, nan_mask);
125
+ return angle;
126
+ }
127
+ Vectorized<double> real() const {
128
+ return *this;
129
+ }
130
+ Vectorized<double> imag() const {
131
+ return _mm256_set1_pd(0);
132
+ }
133
+ Vectorized<double> conj() const {
134
+ return *this;
135
+ }
136
+ Vectorized<double> acos() const {
137
+ return Vectorized<double>(Sleef_acosd4_u10(values));
138
+ }
139
+ Vectorized<double> asin() const {
140
+ return Vectorized<double>(Sleef_asind4_u10(values));
141
+ }
142
+ Vectorized<double> atan() const {
143
+ return Vectorized<double>(Sleef_atand4_u10(values));
144
+ }
145
+ Vectorized<double> atanh() const {
146
+ return Vectorized<double>(Sleef_atanhd4_u10(values));
147
+ }
148
+ Vectorized<double> atan2(const Vectorized<double> &b) const {
149
+ return Vectorized<double>(Sleef_atan2d4_u10(values, b));
150
+ }
151
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
152
+ return Vectorized<double>(Sleef_copysignd4(values, sign));
153
+ }
154
+ Vectorized<double> erf() const {
155
+ return Vectorized<double>(Sleef_erfd4_u10(values));
156
+ }
157
+ Vectorized<double> erfc() const {
158
+ return Vectorized<double>(Sleef_erfcd4_u15(values));
159
+ }
160
+ Vectorized<double> erfinv() const {
161
+ return map(calc_erfinv);
162
+ }
163
+ Vectorized<double> exp() const {
164
+ return Vectorized<double>(Sleef_expd4_u10(values));
165
+ }
166
+ Vectorized<double> exp2() const {
167
+ return Vectorized<double>(Sleef_exp2d4_u10(values));
168
+ }
169
+ Vectorized<double> expm1() const {
170
+ return Vectorized<double>(Sleef_expm1d4_u10(values));
171
+ }
172
+ Vectorized<double> fmod(const Vectorized<double>& q) const {
173
+ return Vectorized<double>(Sleef_fmodd4(values, q));
174
+ }
175
+ Vectorized<double> hypot(const Vectorized<double> &b) const {
176
+ return Vectorized<double>(Sleef_hypotd4_u05(values, b));
177
+ }
178
+ Vectorized<double> i0() const {
179
+ return map(calc_i0);
180
+ }
181
+ Vectorized<double> i0e() const {
182
+ return map(calc_i0e);
183
+ }
184
+ Vectorized<double> digamma() const {
185
+ return map(calc_digamma);
186
+ }
187
+ Vectorized<double> igamma(const Vectorized<double> &x) const {
188
+ __at_align__ double tmp[size()];
189
+ __at_align__ double tmp_x[size()];
190
+ store(tmp);
191
+ x.store(tmp_x);
192
+ for (const auto i : c10::irange(size())) {
193
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
194
+ }
195
+ return loadu(tmp);
196
+ }
197
+ Vectorized<double> igammac(const Vectorized<double> &x) const {
198
+ __at_align__ double tmp[size()];
199
+ __at_align__ double tmp_x[size()];
200
+ store(tmp);
201
+ x.store(tmp_x);
202
+ for (const auto i : c10::irange(size())) {
203
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
204
+ }
205
+ return loadu(tmp);
206
+ }
207
+ Vectorized<double> log() const {
208
+ return Vectorized<double>(Sleef_logd4_u10(values));
209
+ }
210
+ Vectorized<double> log2() const {
211
+ return Vectorized<double>(Sleef_log2d4_u10(values));
212
+ }
213
+ Vectorized<double> log10() const {
214
+ return Vectorized<double>(Sleef_log10d4_u10(values));
215
+ }
216
+ Vectorized<double> log1p() const {
217
+ return Vectorized<double>(Sleef_log1pd4_u10(values));
218
+ }
219
+ Vectorized<double> sin() const {
220
+ return Vectorized<double>(Sleef_sind4_u10(values));
221
+ }
222
+ Vectorized<double> sinh() const {
223
+ return Vectorized<double>(Sleef_sinhd4_u10(values));
224
+ }
225
+ Vectorized<double> cos() const {
226
+ return Vectorized<double>(Sleef_cosd4_u10(values));
227
+ }
228
+ Vectorized<double> cosh() const {
229
+ return Vectorized<double>(Sleef_coshd4_u10(values));
230
+ }
231
+ Vectorized<double> ceil() const {
232
+ return _mm256_ceil_pd(values);
233
+ }
234
+ Vectorized<double> floor() const {
235
+ return _mm256_floor_pd(values);
236
+ }
237
+ Vectorized<double> frac() const;
238
+ Vectorized<double> neg() const {
239
+ return _mm256_xor_pd(_mm256_set1_pd(-0.), values);
240
+ }
241
+ Vectorized<double> nextafter(const Vectorized<double> &b) const {
242
+ return Vectorized<double>(Sleef_nextafterd4(values, b));
243
+ }
244
+ Vectorized<double> round() const {
245
+ return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
246
+ }
247
+ Vectorized<double> tan() const {
248
+ return Vectorized<double>(Sleef_tand4_u10(values));
249
+ }
250
+ Vectorized<double> tanh() const {
251
+ return Vectorized<double>(Sleef_tanhd4_u10(values));
252
+ }
253
+ Vectorized<double> trunc() const {
254
+ return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
255
+ }
256
+ Vectorized<double> lgamma() const {
257
+ return Vectorized<double>(Sleef_lgammad4_u10(values));
258
+ }
259
+ Vectorized<double> sqrt() const {
260
+ return _mm256_sqrt_pd(values);
261
+ }
262
+ Vectorized<double> reciprocal() const {
263
+ return _mm256_div_pd(_mm256_set1_pd(1), values);
264
+ }
265
+ Vectorized<double> rsqrt() const {
266
+ return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values));
267
+ }
268
+ Vectorized<double> pow(const Vectorized<double> &b) const {
269
+ return Vectorized<double>(Sleef_powd4_u10(values, b));
270
+ }
271
+ // Comparison using the _CMP_**_OQ predicate.
272
+ // `O`: get false if an operand is NaN
273
+ // `Q`: do not raise if an operand is NaN
274
+ Vectorized<double> operator==(const Vectorized<double>& other) const {
275
+ return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
276
+ }
277
+
278
+ Vectorized<double> operator!=(const Vectorized<double>& other) const {
279
+ return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
280
+ }
281
+
282
+ Vectorized<double> operator<(const Vectorized<double>& other) const {
283
+ return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ);
284
+ }
285
+
286
+ Vectorized<double> operator<=(const Vectorized<double>& other) const {
287
+ return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ);
288
+ }
289
+
290
+ Vectorized<double> operator>(const Vectorized<double>& other) const {
291
+ return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ);
292
+ }
293
+
294
+ Vectorized<double> operator>=(const Vectorized<double>& other) const {
295
+ return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ);
296
+ }
297
+
298
+ Vectorized<double> eq(const Vectorized<double>& other) const;
299
+ Vectorized<double> ne(const Vectorized<double>& other) const;
300
+ Vectorized<double> lt(const Vectorized<double>& other) const;
301
+ Vectorized<double> le(const Vectorized<double>& other) const;
302
+ Vectorized<double> gt(const Vectorized<double>& other) const;
303
+ Vectorized<double> ge(const Vectorized<double>& other) const;
304
+ };
305
+
306
+ template <>
307
+ Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) {
308
+ return _mm256_add_pd(a, b);
309
+ }
310
+
311
+ template <>
312
+ Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) {
313
+ return _mm256_sub_pd(a, b);
314
+ }
315
+
316
+ template <>
317
+ Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) {
318
+ return _mm256_mul_pd(a, b);
319
+ }
320
+
321
+ template <>
322
+ Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) {
323
+ return _mm256_div_pd(a, b);
324
+ }
325
+
326
+ // frac. Implement this here so we can use subtraction.
327
+ inline Vectorized<double> Vectorized<double>::frac() const {
328
+ return *this - this->trunc();
329
+ }
330
+
331
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
332
+ // either input is a NaN.
333
+ template <>
334
+ Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) {
335
+ Vectorized<double> max = _mm256_max_pd(a, b);
336
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
337
+ // Exploit the fact that all-ones is a NaN.
338
+ return _mm256_or_pd(max, isnan);
339
+ }
340
+
341
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
342
+ // either input is a NaN.
343
+ template <>
344
+ Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) {
345
+ Vectorized<double> min = _mm256_min_pd(a, b);
346
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
347
+ // Exploit the fact that all-ones is a NaN.
348
+ return _mm256_or_pd(min, isnan);
349
+ }
350
+
351
+ template <>
352
+ Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) {
353
+ return _mm256_min_pd(max, _mm256_max_pd(min, a));
354
+ }
355
+
356
+ template <>
357
+ Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) {
358
+ return _mm256_max_pd(min, a);
359
+ }
360
+
361
+ template <>
362
+ Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) {
363
+ return _mm256_min_pd(max, a);
364
+ }
365
+
366
+ template <>
367
+ Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) {
368
+ return _mm256_and_pd(a, b);
369
+ }
370
+
371
+ template <>
372
+ Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) {
373
+ return _mm256_or_pd(a, b);
374
+ }
375
+
376
+ template <>
377
+ Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) {
378
+ return _mm256_xor_pd(a, b);
379
+ }
380
+
381
+ inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const {
382
+ return (*this == other) & Vectorized<double>(1.0);
383
+ }
384
+
385
+ inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const {
386
+ return (*this != other) & Vectorized<double>(1.0);
387
+ }
388
+
389
+ inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const {
390
+ return (*this > other) & Vectorized<double>(1.0);
391
+ }
392
+
393
+ inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const {
394
+ return (*this >= other) & Vectorized<double>(1.0);
395
+ }
396
+
397
+ inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const {
398
+ return (*this < other) & Vectorized<double>(1.0);
399
+ }
400
+
401
+ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const {
402
+ return (*this <= other) & Vectorized<double>(1.0);
403
+ }
404
+
405
+ template <>
406
+ inline void convert(const double* src, double* dst, int64_t n) {
407
+ int64_t i;
408
+ #pragma unroll
409
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
410
+ _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i));
411
+ }
412
+ #pragma unroll
413
+ for (; i < n; i++) {
414
+ dst[i] = src[i];
415
+ }
416
+ }
417
+
418
+ #ifdef CPU_CAPABILITY_AVX2
419
+ template <>
420
+ Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
421
+ return _mm256_fmadd_pd(a, b, c);
422
+ }
423
+
424
+ template <>
425
+ Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
426
+ return _mm256_fmsub_pd(a, b, c);
427
+ }
428
+ #endif
429
+
430
+ #endif
431
+
432
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at::vec {
14
+ // See Note [CPU_CAPABILITY namespace]
15
+ inline namespace CPU_CAPABILITY {
16
+
17
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
18
+
19
+ template <> class Vectorized<float> {
20
+ private:
21
+ __m256 values;
22
+ public:
23
+ using value_type = float;
24
+ using size_type = int;
25
+ static constexpr size_type size() {
26
+ return 8;
27
+ }
28
+ Vectorized() {}
29
+ Vectorized(__m256 v) : values(v) {}
30
+ Vectorized(float val) {
31
+ values = _mm256_set1_ps(val);
32
+ }
33
+ Vectorized(float val1, float val2, float val3, float val4,
34
+ float val5, float val6, float val7, float val8) {
35
+ values = _mm256_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8);
36
+ }
37
+ operator __m256() const {
38
+ return values;
39
+ }
40
+ template <int64_t mask>
41
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
42
+ return _mm256_blend_ps(a.values, b.values, mask);
43
+ }
44
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
45
+ const Vectorized<float>& mask) {
46
+ return _mm256_blendv_ps(a.values, b.values, mask.values);
47
+ }
48
+ template<typename step_t>
49
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
50
+ return Vectorized<float>(
51
+ base, base + step, base + 2 * step, base + 3 * step,
52
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
53
+ }
54
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
55
+ int64_t count = size()) {
56
+ switch (count) {
57
+ case 0:
58
+ return a;
59
+ case 1:
60
+ return blend<1>(a, b);
61
+ case 2:
62
+ return blend<3>(a, b);
63
+ case 3:
64
+ return blend<7>(a, b);
65
+ case 4:
66
+ return blend<15>(a, b);
67
+ case 5:
68
+ return blend<31>(a, b);
69
+ case 6:
70
+ return blend<63>(a, b);
71
+ case 7:
72
+ return blend<127>(a, b);
73
+ }
74
+ return b;
75
+ }
76
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
77
+ if (count == size())
78
+ return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
79
+ __at_align__ float tmp_values[size()];
80
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
81
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
82
+ // instructions while a loop would be compiled to one instruction.
83
+ for (const auto i : c10::irange(size())) {
84
+ tmp_values[i] = 0.0;
85
+ }
86
+ std::memcpy(
87
+ tmp_values, reinterpret_cast<const float*>(ptr), count * sizeof(float));
88
+ return _mm256_loadu_ps(tmp_values);
89
+ }
90
+ void store(void* ptr, int64_t count = size()) const {
91
+ if (count == size()) {
92
+ _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
93
+ } else if (count > 0) {
94
+ float tmp_values[size()];
95
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
96
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
97
+ }
98
+ }
99
+ const float& operator[](int idx) const = delete;
100
+ float& operator[](int idx) = delete;
101
+ int zero_mask() const {
102
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
103
+ __m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
104
+ return _mm256_movemask_ps(cmp);
105
+ }
106
+ Vectorized<float> isnan() const {
107
+ return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
108
+ }
109
+ Vectorized<float> map(float (*const f)(float)) const {
110
+ __at_align__ float tmp[size()];
111
+ store(tmp);
112
+ for (const auto i : c10::irange(size())) {
113
+ tmp[i] = f(tmp[i]);
114
+ }
115
+ return loadu(tmp);
116
+ }
117
+ Vectorized<float> abs() const {
118
+ auto mask = _mm256_set1_ps(-0.f);
119
+ return _mm256_andnot_ps(mask, values);
120
+ }
121
+ Vectorized<float> angle() const {
122
+ const auto zero_vec = _mm256_set1_ps(0.f);
123
+ const auto nan_vec = _mm256_set1_ps(NAN);
124
+ const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ);
125
+ const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
126
+ const auto pi = _mm256_set1_ps(c10::pi<float>);
127
+
128
+ const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ);
129
+ auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
130
+ angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
131
+ return angle;
132
+ }
133
+ Vectorized<float> real() const {
134
+ return *this;
135
+ }
136
+ Vectorized<float> imag() const {
137
+ return _mm256_set1_ps(0);
138
+ }
139
+ Vectorized<float> conj() const {
140
+ return *this;
141
+ }
142
+ Vectorized<float> acos() const {
143
+ return Vectorized<float>(Sleef_acosf8_u10(values));
144
+ }
145
+ Vectorized<float> asin() const {
146
+ return Vectorized<float>(Sleef_asinf8_u10(values));
147
+ }
148
+ Vectorized<float> atan() const {
149
+ return Vectorized<float>(Sleef_atanf8_u10(values));
150
+ }
151
+ Vectorized<float> atanh() const {
152
+ return Vectorized<float>(Sleef_atanhf8_u10(values));
153
+ }
154
+ Vectorized<float> atan2(const Vectorized<float> &b) const {
155
+ return Vectorized<float>(Sleef_atan2f8_u10(values, b));
156
+ }
157
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
158
+ return Vectorized<float>(Sleef_copysignf8(values, sign));
159
+ }
160
+ Vectorized<float> erf() const {
161
+ // constants
162
+ const auto neg_zero_vec = _mm256_set1_ps(-0.f);
163
+ const auto one_vec = _mm256_set1_ps(1.0f);
164
+ const auto p = _mm256_set1_ps(0.3275911f);
165
+ const auto p1 = _mm256_set1_ps(0.254829592f);
166
+ const auto p2 = _mm256_set1_ps(-0.284496736f);
167
+ const auto p3 = _mm256_set1_ps(1.421413741f);
168
+ const auto p4 = _mm256_set1_ps(-1.453152027f);
169
+ const auto p5 = _mm256_set1_ps(1.061405429f);
170
+ // sign(x)
171
+ auto sign_mask = _mm256_and_ps(neg_zero_vec, values);
172
+ auto abs_vec = _mm256_xor_ps(sign_mask, values);
173
+ // t = 1 / (p * abs(x) + 1)
174
+ auto tmp0 = _mm256_fmadd_ps(p, abs_vec, one_vec);
175
+ auto t = _mm256_div_ps(one_vec, tmp0);
176
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
177
+ auto tmp1 = _mm256_fmadd_ps(p5, t, p4);
178
+ auto tmp2 = _mm256_fmadd_ps(tmp1, t, p3);
179
+ auto tmp3 = _mm256_fmadd_ps(tmp2, t, p2);
180
+ auto r = _mm256_fmadd_ps(tmp3, t, p1);
181
+ // - exp(- x * x)
182
+ auto pow_2 = _mm256_mul_ps(values, values);
183
+ auto neg_pow_2 = _mm256_xor_ps(neg_zero_vec, pow_2);
184
+ // auto tmp4 = exp(neg_pow_2);
185
+ auto tmp4 = Vectorized<float>(Sleef_expf8_u10(neg_pow_2));
186
+ auto tmp5 = _mm256_xor_ps(neg_zero_vec, tmp4);
187
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
188
+ auto tmp6 = _mm256_mul_ps(tmp5, t);
189
+ auto tmp7 = _mm256_fmadd_ps(tmp6, r, one_vec);
190
+ return _mm256_xor_ps(sign_mask, tmp7);
191
+ }
192
+ Vectorized<float> erfc() const {
193
+ return Vectorized<float>(Sleef_erfcf8_u15(values));
194
+ }
195
+ Vectorized<float> erfinv() const {
196
+ return map(calc_erfinv);
197
+ }
198
+ Vectorized<float> exp() const {
199
+ return Vectorized<float>(Sleef_expf8_u10(values));
200
+ }
201
+ Vectorized<float> exp2() const {
202
+ return Vectorized<float>(Sleef_exp2f8_u10(values));
203
+ }
204
+ Vectorized<float> expm1() const {
205
+ return Vectorized<float>(Sleef_expm1f8_u10(values));
206
+ }
207
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
208
+ return Vectorized<float>(Sleef_fmodf8(values, q));
209
+ }
210
+ Vectorized<float> log() const {
211
+ return Vectorized<float>(Sleef_logf8_u10(values));
212
+ }
213
+ Vectorized<float> log2() const {
214
+ return Vectorized<float>(Sleef_log2f8_u10(values));
215
+ }
216
+ Vectorized<float> log10() const {
217
+ return Vectorized<float>(Sleef_log10f8_u10(values));
218
+ }
219
+ Vectorized<float> log1p() const {
220
+ return Vectorized<float>(Sleef_log1pf8_u10(values));
221
+ }
222
+ Vectorized<float> frac() const;
223
+ Vectorized<float> sin() const {
224
+ return Vectorized<float>(Sleef_sinf8_u35(values));
225
+ }
226
+ Vectorized<float> sinh() const {
227
+ return Vectorized<float>(Sleef_sinhf8_u10(values));
228
+ }
229
+ Vectorized<float> cos() const {
230
+ return Vectorized<float>(Sleef_cosf8_u35(values));
231
+ }
232
+ Vectorized<float> cosh() const {
233
+ return Vectorized<float>(Sleef_coshf8_u10(values));
234
+ }
235
+ Vectorized<float> ceil() const {
236
+ return _mm256_ceil_ps(values);
237
+ }
238
+ Vectorized<float> floor() const {
239
+ return _mm256_floor_ps(values);
240
+ }
241
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
242
+ return Vectorized<float>(Sleef_hypotf8_u05(values, b));
243
+ }
244
+ Vectorized<float> i0() const {
245
+ return map(calc_i0);
246
+ }
247
+ Vectorized<float> i0e() const {
248
+ return map(calc_i0e);
249
+ }
250
+ Vectorized<float> digamma() const {
251
+ return map(calc_digamma);
252
+ }
253
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
254
+ __at_align__ float tmp[size()];
255
+ __at_align__ float tmp_x[size()];
256
+ store(tmp);
257
+ x.store(tmp_x);
258
+ for (const auto i : c10::irange(size())) {
259
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
260
+ }
261
+ return loadu(tmp);
262
+ }
263
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
264
+ __at_align__ float tmp[size()];
265
+ __at_align__ float tmp_x[size()];
266
+ store(tmp);
267
+ x.store(tmp_x);
268
+ for (const auto i : c10::irange(size())) {
269
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
270
+ }
271
+ return loadu(tmp);
272
+ }
273
+ Vectorized<float> neg() const {
274
+ return _mm256_xor_ps(_mm256_set1_ps(-0.f), values);
275
+ }
276
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
277
+ return Vectorized<float>(Sleef_nextafterf8(values, b));
278
+ }
279
+ Vectorized<float> round() const {
280
+ return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
281
+ }
282
+ Vectorized<float> tan() const {
283
+ return Vectorized<float>(Sleef_tanf8_u10(values));
284
+ }
285
+ Vectorized<float> tanh() const {
286
+ return Vectorized<float>(Sleef_tanhf8_u10(values));
287
+ }
288
+ Vectorized<float> trunc() const {
289
+ return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
290
+ }
291
+ Vectorized<float> lgamma() const {
292
+ return Vectorized<float>(Sleef_lgammaf8_u10(values));
293
+ }
294
+ Vectorized<float> sqrt() const {
295
+ return _mm256_sqrt_ps(values);
296
+ }
297
+ Vectorized<float> reciprocal() const {
298
+ return _mm256_div_ps(_mm256_set1_ps(1), values);
299
+ }
300
+ Vectorized<float> rsqrt() const {
301
+ return _mm256_div_ps(_mm256_set1_ps(1), _mm256_sqrt_ps(values));
302
+ }
303
+ Vectorized<float> pow(const Vectorized<float> &b) const {
304
+ return Vectorized<float>(Sleef_powf8_u10(values, b));
305
+ }
306
+ // Comparison using the _CMP_**_OQ predicate.
307
+ // `O`: get false if an operand is NaN
308
+ // `Q`: do not raise if an operand is NaN
309
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
310
+ return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
311
+ }
312
+
313
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
314
+ return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
315
+ }
316
+
317
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
318
+ return _mm256_cmp_ps(values, other.values, _CMP_LT_OQ);
319
+ }
320
+
321
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
322
+ return _mm256_cmp_ps(values, other.values, _CMP_LE_OQ);
323
+ }
324
+
325
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
326
+ return _mm256_cmp_ps(values, other.values, _CMP_GT_OQ);
327
+ }
328
+
329
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
330
+ return _mm256_cmp_ps(values, other.values, _CMP_GE_OQ);
331
+ }
332
+
333
+ Vectorized<float> eq(const Vectorized<float>& other) const;
334
+ Vectorized<float> ne(const Vectorized<float>& other) const;
335
+ Vectorized<float> gt(const Vectorized<float>& other) const;
336
+ Vectorized<float> ge(const Vectorized<float>& other) const;
337
+ Vectorized<float> lt(const Vectorized<float>& other) const;
338
+ Vectorized<float> le(const Vectorized<float>& other) const;
339
+ };
340
+
341
+ template <>
342
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
343
+ return _mm256_add_ps(a, b);
344
+ }
345
+
346
+ template <>
347
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
348
+ return _mm256_sub_ps(a, b);
349
+ }
350
+
351
+ template <>
352
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
353
+ return _mm256_mul_ps(a, b);
354
+ }
355
+
356
+ template <>
357
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
358
+ return _mm256_div_ps(a, b);
359
+ }
360
+
361
+ // frac. Implement this here so we can use subtraction
362
+ inline Vectorized<float> Vectorized<float>::frac() const {
363
+ return *this - this->trunc();
364
+ }
365
+
366
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
367
+ // either input is a NaN.
368
+ template <>
369
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
370
+ Vectorized<float> max = _mm256_max_ps(a, b);
371
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
372
+ // Exploit the fact that all-ones is a NaN.
373
+ return _mm256_or_ps(max, isnan);
374
+ }
375
+
376
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
377
+ // either input is a NaN.
378
+ template <>
379
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
380
+ Vectorized<float> min = _mm256_min_ps(a, b);
381
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
382
+ // Exploit the fact that all-ones is a NaN.
383
+ return _mm256_or_ps(min, isnan);
384
+ }
385
+
386
+ template <>
387
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
388
+ return _mm256_min_ps(max, _mm256_max_ps(min, a));
389
+ }
390
+
391
+ template <>
392
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
393
+ return _mm256_min_ps(max, a);
394
+ }
395
+
396
+ template <>
397
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
398
+ return _mm256_max_ps(min, a);
399
+ }
400
+
401
+ template <>
402
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
403
+ return _mm256_and_ps(a, b);
404
+ }
405
+
406
+ template <>
407
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
408
+ return _mm256_or_ps(a, b);
409
+ }
410
+
411
+ template <>
412
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
413
+ return _mm256_xor_ps(a, b);
414
+ }
415
+
416
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
417
+ return (*this == other) & Vectorized<float>(1.0f);
418
+ }
419
+
420
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
421
+ return (*this != other) & Vectorized<float>(1.0f);
422
+ }
423
+
424
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
425
+ return (*this > other) & Vectorized<float>(1.0f);
426
+ }
427
+
428
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
429
+ return (*this >= other) & Vectorized<float>(1.0f);
430
+ }
431
+
432
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
433
+ return (*this < other) & Vectorized<float>(1.0f);
434
+ }
435
+
436
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
437
+ return (*this <= other) & Vectorized<float>(1.0f);
438
+ }
439
+
440
+ template <>
441
+ inline void convert(const float* src, float* dst, int64_t n) {
442
+ int64_t i;
443
+ #pragma unroll
444
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
445
+ _mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i));
446
+ }
447
+ #pragma unroll
448
+ for (; i < n; i++) {
449
+ dst[i] = src[i];
450
+ }
451
+ }
452
+
453
+
454
+ template <>
455
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
456
+ return _mm256_fmadd_ps(a, b, c);
457
+ }
458
+
459
+ template <>
460
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
461
+ return _mm256_fmsub_ps(a, b, c);
462
+ }
463
+
464
+ // Used by Inductor CPP codegen
465
+ template<>
466
+ inline void transpose_mxn<float, 8, 8>(
467
+ const float* src,
468
+ int64_t ld_src,
469
+ float* dst,
470
+ int64_t ld_dst) {
471
+ // load from src to registers
472
+ // a: a0 a1 a2 a3 a4 a5 a6 a7
473
+ // b: b0 b1 b2 b3 b4 b5 b6 b7
474
+ // c: c0 c1 c2 c3 c4 c5 c6 c7
475
+ // d: d0 d1 d2 d3 d4 d5 d6 d7
476
+ // e: e0 e1 e2 e3 e4 e5 e6 e7
477
+ // f: f0 f1 f2 f3 f4 f5 f6 f7
478
+ // g: g0 g1 g2 g3 g4 g5 g6 g7
479
+ // h: h0 h1 h2 h3 h4 h5 h6 h7
480
+ __m256 a = _mm256_loadu_ps(&src[0 * ld_src]);
481
+ __m256 b = _mm256_loadu_ps(&src[1 * ld_src]);
482
+ __m256 c = _mm256_loadu_ps(&src[2 * ld_src]);
483
+ __m256 d = _mm256_loadu_ps(&src[3 * ld_src]);
484
+ __m256 e = _mm256_loadu_ps(&src[4 * ld_src]);
485
+ __m256 f = _mm256_loadu_ps(&src[5 * ld_src]);
486
+ __m256 g = _mm256_loadu_ps(&src[6 * ld_src]);
487
+ __m256 h = _mm256_loadu_ps(&src[7 * ld_src]);
488
+
489
+ __m256 ta, tb, tc, td, te, tf, tg, th;
490
+ // unpacking and interleaving 32-bit elements
491
+ // a0 b0 a1 b1 a4 b4 a5 b5
492
+ // a2 b2 a3 b3 a6 b6 a7 b7
493
+ // c0 d0 c1 d1 ...
494
+ // c2 d2 c3 d3 ...
495
+ // e0 f0 e1 f1 ...
496
+ // e2 f2 e3 f3 ...
497
+ // g0 h0 g1 h1 ...
498
+ // g2 h2 g3 h3 ...
499
+ ta = _mm256_unpacklo_ps(a, b);
500
+ tb = _mm256_unpackhi_ps(a, b);
501
+ tc = _mm256_unpacklo_ps(c, d);
502
+ td = _mm256_unpackhi_ps(c, d);
503
+ te = _mm256_unpacklo_ps(e, f);
504
+ tf = _mm256_unpackhi_ps(e, f);
505
+ tg = _mm256_unpacklo_ps(g, h);
506
+ th = _mm256_unpackhi_ps(g, h);
507
+
508
+ // unpacking and interleaving 64-bit elements
509
+ // a0 b0 c0 d0 a4 b4 c4 d4
510
+ // a1 b1 c1 d1 ...
511
+ // a2 b2 c2 d2 ...
512
+ // a3 b3 c3 d3 ...
513
+ // e0 f0 g0 h0 e4 f4 g4 h4
514
+ // e1 f1 g1 h1 ...
515
+ // e2 f2 g2 h2 ...
516
+ // e3 f3 g3 h3 ...
517
+ a = _mm256_castpd_ps(
518
+ _mm256_unpacklo_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
519
+ b = _mm256_castpd_ps(
520
+ _mm256_unpackhi_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
521
+ c = _mm256_castpd_ps(
522
+ _mm256_unpacklo_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
523
+ d = _mm256_castpd_ps(
524
+ _mm256_unpackhi_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
525
+ e = _mm256_castpd_ps(
526
+ _mm256_unpacklo_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
527
+ f = _mm256_castpd_ps(
528
+ _mm256_unpackhi_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
529
+ g = _mm256_castpd_ps(
530
+ _mm256_unpacklo_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
531
+ h = _mm256_castpd_ps(
532
+ _mm256_unpackhi_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
533
+
534
+ // shuffle 128-bits (composed of 4 32-bit elements)
535
+ // a0 b0 c0 d0 e0 f0 g0 h0
536
+ // a1 b1 c1 d1 ...
537
+ // a2 b2 c2 d2 ...
538
+ // a3 b3 c3 d3 ...
539
+ // a4 b4 c4 d4 ...
540
+ // a5 b5 c5 d5 ...
541
+ // a6 b6 c6 d6 ...
542
+ // a7 b7 c7 d7 ...
543
+ ta = _mm256_permute2f128_ps(a, e, 0x20);
544
+ tb = _mm256_permute2f128_ps(b, f, 0x20);
545
+ tc = _mm256_permute2f128_ps(c, g, 0x20);
546
+ td = _mm256_permute2f128_ps(d, h, 0x20);
547
+ te = _mm256_permute2f128_ps(a, e, 0x31);
548
+ tf = _mm256_permute2f128_ps(b, f, 0x31);
549
+ tg = _mm256_permute2f128_ps(c, g, 0x31);
550
+ th = _mm256_permute2f128_ps(d, h, 0x31);
551
+
552
+ // store from registers to dst
553
+ _mm256_storeu_ps(&dst[0 * ld_dst], ta);
554
+ _mm256_storeu_ps(&dst[1 * ld_dst], tb);
555
+ _mm256_storeu_ps(&dst[2 * ld_dst], tc);
556
+ _mm256_storeu_ps(&dst[3 * ld_dst], td);
557
+ _mm256_storeu_ps(&dst[4 * ld_dst], te);
558
+ _mm256_storeu_ps(&dst[5 * ld_dst], tf);
559
+ _mm256_storeu_ps(&dst[6 * ld_dst], tg);
560
+ _mm256_storeu_ps(&dst[7 * ld_dst], th);
561
+ }
562
+
563
+ #endif
564
+
565
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h ADDED
@@ -0,0 +1,879 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(__aarch64__) && defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ // Sleef offers vectorized versions of some transcedentals
15
+ // such as sin, cos, tan etc..
16
+ // However for now opting for STL, since we are not building
17
+ // with Sleef for mobile yet.
18
+
19
+ namespace at::vec {
20
+ // See Note [CPU_CAPABILITY namespace]
21
+ inline namespace CPU_CAPABILITY {
22
+
23
+ // Right now contains only aarch64 implementation.
24
+ // Due to follow two reasons aarch32 is not currently supported.
25
+ // 1. Due to difference in ISA been aarch32 and aarch64, intrinsics
26
+ // that work for aarch64 dont work for aarch32.
27
+ // 2. Android NDK r21 has problems with compiling aarch32.
28
+ // Clang seg faults.
29
+ // https://github.com/android/ndk/issues/1248
30
+ // https://bugs.llvm.org/show_bug.cgi?id=45824
31
+ // Most likely we will do aarch32 support with inline asm.
32
+ #if defined(__aarch64__)
33
+
34
+ #ifdef __BIG_ENDIAN__
35
+ #error "Big endian is not supported."
36
+ #endif
37
+
38
+ #if defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
39
+ #define USE_SLEEF(sleef_code, non_sleef_code) sleef_code
40
+ #else
41
+ #define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code
42
+ #endif
43
+
44
+ template<int index, bool mask_val>
45
+ struct BlendRegs {
46
+ static float32x4_t impl(
47
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res);
48
+ };
49
+
50
+ template<int index>
51
+ struct BlendRegs<index, true>{
52
+ static float32x4_t impl(
53
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
54
+ return vsetq_lane_f32(vgetq_lane_f32(b, index), res, index);
55
+ }
56
+ };
57
+
58
+ template<int index>
59
+ struct BlendRegs<index, false>{
60
+ static float32x4_t impl(
61
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
62
+ return vsetq_lane_f32(vgetq_lane_f32(a, index), res, index);
63
+ }
64
+ };
65
+
66
+ template <> class Vectorized<float> {
67
+ private:
68
+ float32x4x2_t values;
69
+ public:
70
+ using value_type = float;
71
+ using size_type = int;
72
+ static constexpr size_type size() {
73
+ return 8;
74
+ }
75
+ Vectorized() {}
76
+ Vectorized(float32x4x2_t v) : values(v) {}
77
+ Vectorized(float val) : values{vdupq_n_f32(val), vdupq_n_f32(val) } {}
78
+ Vectorized(float val0, float val1, float val2, float val3,
79
+ float val4, float val5, float val6, float val7) :
80
+ values{val0, val1, val2, val3, val4, val5, val6, val7} {}
81
+ Vectorized(float32x4_t val0, float32x4_t val1) : values{val0, val1} {}
82
+ operator float32x4x2_t() const {
83
+ return values;
84
+ }
85
+ template <int64_t mask>
86
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
87
+ Vectorized<float> vec;
88
+ // 0.
89
+ vec.values.val[0] =
90
+ BlendRegs<0, (mask & 0x01)!=0>::impl(
91
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
92
+ vec.values.val[0] =
93
+ BlendRegs<1, (mask & 0x02)!=0>::impl(
94
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
95
+ vec.values.val[0] =
96
+ BlendRegs<2, (mask & 0x04)!=0>::impl(
97
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
98
+ vec.values.val[0] =
99
+ BlendRegs<3, (mask & 0x08)!=0>::impl(
100
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
101
+ // 1.
102
+ vec.values.val[1] =
103
+ BlendRegs<0, (mask & 0x10)!=0>::impl(
104
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
105
+ vec.values.val[1] =
106
+ BlendRegs<1, (mask & 0x20)!=0>::impl(
107
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
108
+ vec.values.val[1] =
109
+ BlendRegs<2, (mask & 0x40)!=0>::impl(
110
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
111
+ vec.values.val[1] =
112
+ BlendRegs<3, (mask & 0x80)!=0>::impl(
113
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
114
+ return vec;
115
+ }
116
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
117
+ const Vectorized<float>& mask) {
118
+ // TODO
119
+ // NB: This requires that each value, i.e., each uint value,
120
+ // of the mask either all be zeros or all be 1s.
121
+ // We perhaps need some kind of an assert?
122
+ // But that will affect performance.
123
+ Vectorized<float> vec(mask.values);
124
+ vec.values.val[0] = vbslq_f32(
125
+ vreinterpretq_u32_f32(vec.values.val[0]),
126
+ b.values.val[0],
127
+ a.values.val[0]);
128
+ vec.values.val[1] = vbslq_f32(
129
+ vreinterpretq_u32_f32(vec.values.val[1]),
130
+ b.values.val[1],
131
+ a.values.val[1]);
132
+ return vec;
133
+ }
134
+ template<typename step_t>
135
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
136
+ const Vectorized<float> base_vec(base);
137
+ const Vectorized<float> step_vec(step);
138
+ const Vectorized<float> step_sizes(0, 1, 2, 3, 4, 5, 6, 7);
139
+ return fmadd(step_sizes, step_vec, base_vec);
140
+ }
141
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
142
+ int64_t count = size()) {
143
+ switch (count) {
144
+ case 0:
145
+ return a;
146
+ case 1:
147
+ {
148
+ Vectorized<float> vec;
149
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0x0, 0x0, 0x0};
150
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
151
+ vec.values.val[1] = a.values.val[1];
152
+ vec.values.val[0] = vbslq_f32(
153
+ vreinterpretq_u32_f32(vec.values.val[0]),
154
+ b.values.val[0],
155
+ a.values.val[0]);
156
+ return vec;
157
+ }
158
+ case 2:
159
+ {
160
+ Vectorized<float> vec;
161
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
162
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
163
+ vec.values.val[1] = a.values.val[1];
164
+ vec.values.val[0] = vbslq_f32(
165
+ vreinterpretq_u32_f32(vec.values.val[0]),
166
+ b.values.val[0],
167
+ a.values.val[0]);
168
+ return vec;
169
+ }
170
+ case 3:
171
+ {
172
+ Vectorized<float> vec;
173
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
174
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
175
+ vec.values.val[1] = a.values.val[1];
176
+ vec.values.val[0] = vbslq_f32(
177
+ vreinterpretq_u32_f32(vec.values.val[0]),
178
+ b.values.val[0],
179
+ a.values.val[0]);
180
+ return vec;
181
+ }
182
+ case 4:
183
+ return Vectorized<float>(b.values.val[0], a.values.val[1]);
184
+ case 5:
185
+ {
186
+ Vectorized<float> vec;
187
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0x0, 0x0, 0x0};
188
+ vec.values.val[0] = b.values.val[0];
189
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
190
+ vec.values.val[1] = vbslq_f32(
191
+ vreinterpretq_u32_f32(vec.values.val[1]),
192
+ b.values.val[1],
193
+ a.values.val[1]);
194
+ return vec;
195
+ }
196
+ case 6:
197
+ {
198
+ Vectorized<float> vec;
199
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
200
+ vec.values.val[0] = b.values.val[0];
201
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
202
+ vec.values.val[1] = vbslq_f32(
203
+ vreinterpretq_u32_f32(vec.values.val[1]),
204
+ b.values.val[1],
205
+ a.values.val[1]);
206
+ return vec;
207
+ }
208
+ case 7:
209
+ {
210
+ Vectorized<float> vec;
211
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
212
+ vec.values.val[0] = b.values.val[0];
213
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
214
+ vec.values.val[1] = vbslq_f32(
215
+ vreinterpretq_u32_f32(vec.values.val[1]),
216
+ b.values.val[1],
217
+ a.values.val[1]);
218
+ return vec;
219
+ }
220
+ }
221
+ return b;
222
+ }
223
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
224
+ if (count == size()) {
225
+ return vld1q_f32_x2(reinterpret_cast<const float*>(ptr));
226
+ }
227
+ else if (count == (size() >> 1)) {
228
+ Vectorized<float> res;
229
+ res.values.val[0] = vld1q_f32(reinterpret_cast<const float*>(ptr));
230
+ res.values.val[1] = vdupq_n_f32(0.f);
231
+ return res;
232
+ }
233
+ else {
234
+ __at_align__ float tmp_values[size()];
235
+ for (const auto i : c10::irange(size())) {
236
+ tmp_values[i] = 0.0;
237
+ }
238
+ std::memcpy(
239
+ tmp_values,
240
+ reinterpret_cast<const float*>(ptr),
241
+ count * sizeof(float));
242
+ return vld1q_f32_x2(reinterpret_cast<const float*>(tmp_values));
243
+ }
244
+ }
245
+ void store(void* ptr, int64_t count = size()) const {
246
+ if (count == size()) {
247
+ vst1q_f32_x2(reinterpret_cast<float*>(ptr), values);
248
+ }
249
+ else if (count == (size() >> 1)) {
250
+ vst1q_f32(reinterpret_cast<float*>(ptr), values.val[0]);
251
+ }
252
+ else {
253
+ float tmp_values[size()];
254
+ vst1q_f32_x2(reinterpret_cast<float*>(tmp_values), values);
255
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
256
+ }
257
+ }
258
+ inline const float32x4_t& get_low() const {
259
+ return values.val[0];
260
+ }
261
+ inline float32x4_t& get_low() {
262
+ return values.val[0];
263
+ }
264
+ inline const float32x4_t& get_high() const {
265
+ return values.val[1];
266
+ }
267
+ inline float32x4_t& get_high() {
268
+ return values.val[1];
269
+ }
270
+ // Very slow implementation of indexing.
271
+ // Only required because vec256_qint refers to this.
272
+ // Once we specialize that implementation for ARM
273
+ // this should be removed. TODO (kimishpatel)
274
+ float operator[](int idx) const {
275
+ __at_align__ float tmp[size()];
276
+ store(tmp);
277
+ return tmp[idx];
278
+ }
279
+ float operator[](int idx) {
280
+ __at_align__ float tmp[size()];
281
+ store(tmp);
282
+ return tmp[idx];
283
+ }
284
+ // For boolean version where we want to if any 1/all zero
285
+ // etc. can be done faster in a different way.
286
+ int zero_mask() const {
287
+ __at_align__ float tmp[size()];
288
+ store(tmp);
289
+ int mask = 0;
290
+ for (int i = 0; i < size(); ++ i) {
291
+ if (tmp[i] == 0.f) {
292
+ mask |= (1 << i);
293
+ }
294
+ }
295
+ return mask;
296
+ }
297
+ Vectorized<float> isnan() const {
298
+ __at_align__ float tmp[size()];
299
+ __at_align__ float res[size()];
300
+ store(tmp);
301
+ for (const auto i : c10::irange(size())) {
302
+ if (_isnan(tmp[i])) {
303
+ std::memset(static_cast<void*>(&res[i]), 0xFF, sizeof(float));
304
+ } else {
305
+ std::memset(static_cast<void*>(&res[i]), 0, sizeof(float));
306
+ }
307
+ }
308
+ return loadu(res);
309
+ };
310
+ Vectorized<float> map(float (*const f)(float)) const {
311
+ __at_align__ float tmp[size()];
312
+ store(tmp);
313
+ for (const auto i : c10::irange(size())) {
314
+ tmp[i] = f(tmp[i]);
315
+ }
316
+ return loadu(tmp);
317
+ }
318
+ Vectorized<float> abs() const {
319
+ return Vectorized<float>(vabsq_f32(values.val[0]), vabsq_f32(values.val[1]));
320
+ }
321
+ Vectorized<float> angle() const {
322
+ auto zero = Vectorized<float>(0);
323
+ auto pi = Vectorized<float>(c10::pi<float>);
324
+ auto tmp = blendv(zero, pi, *this < zero);
325
+ return blendv(tmp, *this, isnan());
326
+ }
327
+ Vectorized<float> real() const {
328
+ return *this;
329
+ }
330
+ Vectorized<float> imag() const {
331
+ return Vectorized<float>(0.f);
332
+ }
333
+ Vectorized<float> conj() const {
334
+ return *this;
335
+ }
336
+ Vectorized<float> acos() const {
337
+ return USE_SLEEF(
338
+ Vectorized<float>(Sleef_acosf4_u10(values.val[0]), Sleef_acosf4_u10(values.val[1])),
339
+ map(std::acos)
340
+ );
341
+ }
342
+ Vectorized<float> asin() const {
343
+ return USE_SLEEF(
344
+ Vectorized<float>(Sleef_asinf4_u10(values.val[0]), Sleef_asinf4_u10(values.val[1])),
345
+ map(std::asin)
346
+ );
347
+ }
348
+ Vectorized<float> atan() const {
349
+ return USE_SLEEF(
350
+ Vectorized<float>(Sleef_atanf4_u10(values.val[0]), Sleef_atanf4_u10(values.val[1])),
351
+ map(std::atan)
352
+ );
353
+ }
354
+ Vectorized<float> atanh() const {
355
+ return USE_SLEEF(
356
+ Vectorized<float>(Sleef_atanhf4_u10(values.val[0]), Sleef_atanhf4_u10(values.val[1])),
357
+ map(std::atanh)
358
+ );
359
+ }
360
+ Vectorized<float> atan2(const Vectorized<float> &exp) const {
361
+ USE_SLEEF(
362
+ {
363
+ return Vectorized<float>(Sleef_atan2f4_u10(values.val[0], exp.values.val[0]),
364
+ Sleef_atan2f4_u10(values.val[1], exp.values.val[1]));
365
+ },
366
+ {
367
+ __at_align__ float tmp[size()];
368
+ __at_align__ float tmp_exp[size()];
369
+ store(tmp);
370
+ exp.store(tmp_exp);
371
+ for (const auto i : c10::irange(size())) {
372
+ tmp[i] = std::atan2(tmp[i], tmp_exp[i]);
373
+ }
374
+ return loadu(tmp);
375
+ }
376
+ )
377
+ }
378
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
379
+ USE_SLEEF(
380
+ {
381
+ return Vectorized<float>(Sleef_copysignf4(values.val[0], sign.values.val[0]),
382
+ Sleef_copysignf4(values.val[1], sign.values.val[1]));
383
+ },
384
+ {
385
+ __at_align__ float tmp[size()];
386
+ __at_align__ float tmp_sign[size()];
387
+ store(tmp);
388
+ sign.store(tmp_sign);
389
+ for (size_type i = 0; i < size(); i++) {
390
+ tmp[i] = std::copysign(tmp[i], tmp_sign[i]);
391
+ }
392
+ return loadu(tmp);
393
+ }
394
+ )
395
+ }
396
+ Vectorized<float> erf() const;
397
+ Vectorized<float> erfc() const {
398
+ return USE_SLEEF(
399
+ Vectorized<float>(Sleef_erfcf4_u15(values.val[0]), Sleef_erfcf4_u15(values.val[1])),
400
+ map(std::erfc)
401
+ );
402
+ }
403
+ Vectorized<float> erfinv() const {
404
+ return map(calc_erfinv);
405
+ }
406
+ Vectorized<float> exp() const {
407
+ return USE_SLEEF(
408
+ Vectorized<float>(Sleef_expf4_u10(values.val[0]), Sleef_expf4_u10(values.val[1])),
409
+ map(std::exp)
410
+ );
411
+ }
412
+ Vectorized<float> exp2() const {
413
+ return USE_SLEEF(
414
+ Vectorized<float>(Sleef_exp2f4_u10(values.val[0]), Sleef_exp2f4_u10(values.val[1])),
415
+ map(std::exp2)
416
+ );
417
+ }
418
+ Vectorized<float> expm1() const {
419
+ return USE_SLEEF(
420
+ Vectorized<float>(Sleef_expm1f4_u10(values.val[0]), Sleef_expm1f4_u10(values.val[1])),
421
+ map(std::expm1)
422
+ );
423
+ }
424
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
425
+ USE_SLEEF(
426
+ {
427
+ return Vectorized<float>(Sleef_fmodf4(values.val[0], q.values.val[0]),
428
+ Sleef_fmodf4(values.val[1], q.values.val[1]));
429
+ },
430
+ {
431
+ __at_align__ float tmp[size()];
432
+ __at_align__ float tmp_q[size()];
433
+ store(tmp);
434
+ q.store(tmp_q);
435
+ for (const auto i : c10::irange(size())) {
436
+ tmp[i] = std::fmod(tmp[i], tmp_q[i]);
437
+ }
438
+ return loadu(tmp);
439
+ }
440
+ )
441
+ }
442
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
443
+ USE_SLEEF(
444
+ {
445
+ return Vectorized<float>(Sleef_hypotf4_u05(values.val[0], b.values.val[0]),
446
+ Sleef_hypotf4_u05(values.val[1], b.values.val[1]));
447
+ },
448
+ {
449
+ __at_align__ float tmp[size()];
450
+ __at_align__ float tmp_b[size()];
451
+ store(tmp);
452
+ b.store(tmp_b);
453
+ for (const auto i : c10::irange(size())) {
454
+ tmp[i] = std::hypot(tmp[i], tmp_b[i]);
455
+ }
456
+ return loadu(tmp);
457
+ }
458
+ )
459
+ }
460
+ Vectorized<float> i0() const {
461
+ return map(calc_i0);
462
+ }
463
+ Vectorized<float> i0e() const {
464
+ return map(calc_i0e);
465
+ }
466
+ Vectorized<float> digamma() const {
467
+ return map(calc_digamma);
468
+ }
469
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
470
+ __at_align__ float tmp[size()];
471
+ __at_align__ float tmp_x[size()];
472
+ store(tmp);
473
+ x.store(tmp_x);
474
+ for (const auto i : c10::irange(size())) {
475
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
476
+ }
477
+ return loadu(tmp);
478
+ }
479
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
480
+ __at_align__ float tmp[size()];
481
+ __at_align__ float tmp_x[size()];
482
+ store(tmp);
483
+ x.store(tmp_x);
484
+ for (const auto i : c10::irange(size())) {
485
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
486
+ }
487
+ return loadu(tmp);
488
+ }
489
+ Vectorized<float> log() const {
490
+ return USE_SLEEF(
491
+ Vectorized<float>(Sleef_logf4_u10(values.val[0]), Sleef_logf4_u10(values.val[1])),
492
+ map(std::log)
493
+ );
494
+ }
495
+ Vectorized<float> log10() const {
496
+ return USE_SLEEF(
497
+ Vectorized<float>(Sleef_log10f4_u10(values.val[0]), Sleef_log10f4_u10(values.val[1])),
498
+ map(std::log10)
499
+ );
500
+ }
501
+ Vectorized<float> log1p() const {
502
+ return USE_SLEEF(
503
+ Vectorized<float>(Sleef_log1pf4_u10(values.val[0]), Sleef_log1pf4_u10(values.val[1])),
504
+ map(std::log1p)
505
+ );
506
+ }
507
+ Vectorized<float> log2() const {
508
+ return USE_SLEEF(
509
+ Vectorized<float>(Sleef_log2f4_u10(values.val[0]), Sleef_log2f4_u10(values.val[1])),
510
+ map(std::log2)
511
+ );
512
+ }
513
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
514
+ USE_SLEEF(
515
+ {
516
+ return Vectorized<float>(Sleef_nextafterf4(values.val[0], b.values.val[0]),
517
+ Sleef_nextafterf4(values.val[1], b.values.val[1]));
518
+ },
519
+ {
520
+ __at_align__ float tmp[size()];
521
+ __at_align__ float tmp_b[size()];
522
+ store(tmp);
523
+ b.store(tmp_b);
524
+ for (const auto i : c10::irange(size())) {
525
+ tmp[i] = std::nextafter(tmp[i], tmp_b[i]);
526
+ }
527
+ return loadu(tmp);
528
+ }
529
+ )
530
+ }
531
+ Vectorized<float> frac() const;
532
+ Vectorized<float> sin() const {
533
+ return USE_SLEEF(
534
+ Vectorized<float>(Sleef_sinf4_u10(values.val[0]), Sleef_sinf4_u10(values.val[1])),
535
+ map(std::sin)
536
+ );
537
+ }
538
+ Vectorized<float> sinh() const {
539
+ return USE_SLEEF(
540
+ Vectorized<float>(Sleef_sinhf4_u10(values.val[0]), Sleef_sinhf4_u10(values.val[1])),
541
+ map(std::sinh)
542
+ );
543
+ }
544
+ Vectorized<float> cos() const {
545
+ return USE_SLEEF(
546
+ Vectorized<float>(Sleef_cosf4_u10(values.val[0]), Sleef_cosf4_u10(values.val[1])),
547
+ map(std::cos)
548
+ );
549
+ }
550
+ Vectorized<float> cosh() const {
551
+ return USE_SLEEF(
552
+ Vectorized<float>(Sleef_coshf4_u10(values.val[0]), Sleef_coshf4_u10(values.val[1])),
553
+ map(std::cosh)
554
+ );
555
+ }
556
+ Vectorized<float> ceil() const {
557
+ return map(at::native::ceil_impl);
558
+ }
559
+ Vectorized<float> floor() const {
560
+ return map(at::native::floor_impl);
561
+ }
562
+ Vectorized<float> neg() const {
563
+ return Vectorized<float>(
564
+ vnegq_f32(values.val[0]),
565
+ vnegq_f32(values.val[1]));
566
+ }
567
+ Vectorized<float> round() const {
568
+ // We do not use std::round because we would like to round midway numbers to the nearest even integer.
569
+ return map(at::native::round_impl);
570
+ }
571
+ Vectorized<float> tan() const {
572
+ return USE_SLEEF(
573
+ Vectorized<float>(Sleef_tanf4_u10(values.val[0]), Sleef_tanf4_u10(values.val[1])),
574
+ map(std::tan)
575
+ );
576
+ }
577
+ Vectorized<float> tanh() const {
578
+ return USE_SLEEF(
579
+ Vectorized<float>(Sleef_tanhf4_u10(values.val[0]), Sleef_tanhf4_u10(values.val[1])),
580
+ map(std::tanh)
581
+ );
582
+ }
583
+ Vectorized<float> trunc() const {
584
+ float32x4_t r0 = vrndq_f32(values.val[0]);
585
+ float32x4_t r1 = vrndq_f32(values.val[1]);
586
+ return Vectorized<float>(r0, r1);
587
+ }
588
+ Vectorized<float> lgamma() const {
589
+ return USE_SLEEF(
590
+ Vectorized<float>(Sleef_lgammaf4_u10(values.val[0]), Sleef_lgammaf4_u10(values.val[1])),
591
+ map(std::lgamma)
592
+ );
593
+ }
594
+ Vectorized<float> sqrt() const {
595
+ return Vectorized<float>(
596
+ vsqrtq_f32(values.val[0]),
597
+ vsqrtq_f32(values.val[1]));
598
+ }
599
+ Vectorized<float> reciprocal() const {
600
+ auto r0 = vdivq_f32(vdupq_n_f32(1.0f), values.val[0]);
601
+ auto r1 = vdivq_f32(vdupq_n_f32(1.0f), values.val[1]);
602
+ return Vectorized<float>(r0, r1);
603
+ }
604
+ Vectorized<float> rsqrt() const {
605
+ return this->sqrt().reciprocal();
606
+ }
607
+ Vectorized<float> pow(const Vectorized<float> &exp) const {
608
+ USE_SLEEF(
609
+ {
610
+ return Vectorized<float>(Sleef_powf4_u10(values.val[0], exp.values.val[0]),
611
+ Sleef_powf4_u10(values.val[1], exp.values.val[1]));
612
+ },
613
+ {
614
+ __at_align__ float tmp[size()];
615
+ __at_align__ float tmp_exp[size()];
616
+ store(tmp);
617
+ exp.store(tmp_exp);
618
+ for (const auto i : c10::irange(size())) {
619
+ tmp[i] = std::pow(tmp[i], tmp_exp[i]);
620
+ }
621
+ return loadu(tmp);
622
+ }
623
+ )
624
+ }
625
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
626
+ float32x4_t r0 =
627
+ vreinterpretq_f32_u32(vceqq_f32(values.val[0], other.values.val[0]));
628
+ float32x4_t r1 =
629
+ vreinterpretq_f32_u32(vceqq_f32(values.val[1], other.values.val[1]));
630
+ return Vectorized<float>(r0, r1);
631
+ }
632
+
633
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
634
+ float32x4_t r0 = vreinterpretq_f32_u32(
635
+ vmvnq_u32(vceqq_f32(values.val[0], other.values.val[0])));
636
+ float32x4_t r1 = vreinterpretq_f32_u32(
637
+ vmvnq_u32(vceqq_f32(values.val[1], other.values.val[1])));
638
+ return Vectorized<float>(r0, r1);
639
+ }
640
+
641
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
642
+ float32x4_t r0 =
643
+ vreinterpretq_f32_u32(vcltq_f32(values.val[0], other.values.val[0]));
644
+ float32x4_t r1 =
645
+ vreinterpretq_f32_u32(vcltq_f32(values.val[1], other.values.val[1]));
646
+ return Vectorized<float>(r0, r1);
647
+ }
648
+
649
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
650
+ float32x4_t r0 =
651
+ vreinterpretq_f32_u32(vcleq_f32(values.val[0], other.values.val[0]));
652
+ float32x4_t r1 =
653
+ vreinterpretq_f32_u32(vcleq_f32(values.val[1], other.values.val[1]));
654
+ return Vectorized<float>(r0, r1);
655
+ }
656
+
657
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
658
+ float32x4_t r0 =
659
+ vreinterpretq_f32_u32(vcgtq_f32(values.val[0], other.values.val[0]));
660
+ float32x4_t r1 =
661
+ vreinterpretq_f32_u32(vcgtq_f32(values.val[1], other.values.val[1]));
662
+ return Vectorized<float>(r0, r1);
663
+ }
664
+
665
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
666
+ float32x4_t r0 =
667
+ vreinterpretq_f32_u32(vcgeq_f32(values.val[0], other.values.val[0]));
668
+ float32x4_t r1 =
669
+ vreinterpretq_f32_u32(vcgeq_f32(values.val[1], other.values.val[1]));
670
+ return Vectorized<float>(r0, r1);
671
+ }
672
+
673
+ Vectorized<float> eq(const Vectorized<float>& other) const;
674
+ Vectorized<float> ne(const Vectorized<float>& other) const;
675
+ Vectorized<float> gt(const Vectorized<float>& other) const;
676
+ Vectorized<float> ge(const Vectorized<float>& other) const;
677
+ Vectorized<float> lt(const Vectorized<float>& other) const;
678
+ Vectorized<float> le(const Vectorized<float>& other) const;
679
+ };
680
+
681
+ template <>
682
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
683
+ float32x4_t r0 = vaddq_f32(a.get_low(), b.get_low());
684
+ float32x4_t r1 = vaddq_f32(a.get_high(), b.get_high());
685
+ return Vectorized<float>(r0, r1);
686
+ }
687
+
688
+ template <>
689
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
690
+ float32x4_t r0 = vsubq_f32(a.get_low(), b.get_low());
691
+ float32x4_t r1 = vsubq_f32(a.get_high(), b.get_high());
692
+ return Vectorized<float>(r0, r1);
693
+ }
694
+
695
+ template <>
696
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
697
+ float32x4_t r0 = vmulq_f32(a.get_low(), b.get_low());
698
+ float32x4_t r1 = vmulq_f32(a.get_high(), b.get_high());
699
+ return Vectorized<float>(r0, r1);
700
+ }
701
+
702
+ template <>
703
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
704
+ float32x4_t r0 = vdivq_f32(a.get_low(), b.get_low());
705
+ float32x4_t r1 = vdivq_f32(a.get_high(), b.get_high());
706
+ return Vectorized<float>(r0, r1);
707
+ }
708
+
709
+ // frac. Implement this here so we can use subtraction
710
+ inline Vectorized<float> Vectorized<float>::frac() const {
711
+ return *this - this->trunc();
712
+ }
713
+
714
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
715
+ // either input is a NaN.
716
+ template <>
717
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
718
+ float32x4_t r0 = vmaxq_f32(a.get_low(), b.get_low());
719
+ float32x4_t r1 = vmaxq_f32(a.get_high(), b.get_high());
720
+ return Vectorized<float>(r0, r1);
721
+ }
722
+
723
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
724
+ // either input is a NaN.
725
+ template <>
726
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
727
+ float32x4_t r0 = vminq_f32(a.get_low(), b.get_low());
728
+ float32x4_t r1 = vminq_f32(a.get_high(), b.get_high());
729
+ return Vectorized<float>(r0, r1);
730
+ }
731
+
732
+ template <>
733
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
734
+ return minimum(max, maximum(min, a));
735
+ }
736
+
737
+ template <>
738
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
739
+ return minimum(max, a);
740
+ }
741
+
742
+ template <>
743
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
744
+ return maximum(min, a);
745
+ }
746
+
747
+ template <>
748
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
749
+ float32x4_t r0 = vreinterpretq_f32_u32(vandq_u32(
750
+ vreinterpretq_u32_f32(a.get_low()),
751
+ vreinterpretq_u32_f32(b.get_low())));
752
+ float32x4_t r1 = vreinterpretq_f32_u32(vandq_u32(
753
+ vreinterpretq_u32_f32(a.get_high()),
754
+ vreinterpretq_u32_f32(b.get_high())));
755
+ return Vectorized<float>(r0, r1);
756
+ }
757
+
758
+ template <>
759
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
760
+ float32x4_t r0 = vreinterpretq_f32_u32(vorrq_u32(
761
+ vreinterpretq_u32_f32(a.get_low()),
762
+ vreinterpretq_u32_f32(b.get_low())));
763
+ float32x4_t r1 = vreinterpretq_f32_u32(vorrq_u32(
764
+ vreinterpretq_u32_f32(a.get_high()),
765
+ vreinterpretq_u32_f32(b.get_high())));
766
+ return Vectorized<float>(r0, r1);
767
+ }
768
+
769
+ template <>
770
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
771
+ float32x4_t r0 = vreinterpretq_f32_u32(veorq_u32(
772
+ vreinterpretq_u32_f32(a.get_low()),
773
+ vreinterpretq_u32_f32(b.get_low())));
774
+ float32x4_t r1 = vreinterpretq_f32_u32(veorq_u32(
775
+ vreinterpretq_u32_f32(a.get_high()),
776
+ vreinterpretq_u32_f32(b.get_high())));
777
+ return Vectorized<float>(r0, r1);
778
+ }
779
+
780
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
781
+ return (*this == other) & Vectorized<float>(1.0f);
782
+ }
783
+
784
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
785
+ return (*this != other) & Vectorized<float>(1.0f);
786
+ }
787
+
788
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
789
+ return (*this > other) & Vectorized<float>(1.0f);
790
+ }
791
+
792
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
793
+ return (*this >= other) & Vectorized<float>(1.0f);
794
+ }
795
+
796
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
797
+ return (*this < other) & Vectorized<float>(1.0f);
798
+ }
799
+
800
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
801
+ return (*this <= other) & Vectorized<float>(1.0f);
802
+ }
803
+
804
+ template <>
805
+ inline void convert(const float* src, int32_t* dst, int64_t n) {
806
+ int64_t i;
807
+ #pragma unroll
808
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
809
+ vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i)));
810
+ vst1q_s32(dst + i + 4, vcvtq_s32_f32(vld1q_f32(src + i + 4)));
811
+ }
812
+ #pragma unroll
813
+ for (; i < n; i++) {
814
+ dst[i] = static_cast<int32_t>(src[i]);
815
+ }
816
+ }
817
+
818
+ template <>
819
+ inline void convert(const int32_t* src, float* dst, int64_t n) {
820
+ int64_t i;
821
+ #pragma unroll
822
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
823
+ vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i)));
824
+ vst1q_f32(dst + i + 4, vcvtq_f32_s32(vld1q_s32(src + i + 4)));
825
+ }
826
+ #pragma unroll
827
+ for (; i < n; i++) {
828
+ dst[i] = static_cast<float>(src[i]);
829
+ }
830
+ }
831
+
832
+ template <>
833
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
834
+ float32x4_t r0 = vfmaq_f32(c.get_low(), a.get_low(), b.get_low());
835
+ float32x4_t r1 = vfmaq_f32(c.get_high(), a.get_high(), b.get_high());
836
+ return Vectorized<float>(r0, r1);
837
+ }
838
+
839
+ template <>
840
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
841
+ float32x4_t r0 = vfmsq_f32(c.get_low(), a.get_low(), b.get_low());
842
+ float32x4_t r1 = vfmsq_f32(c.get_high(), a.get_high(), b.get_high());
843
+ return Vectorized<float>(r0, r1);
844
+ }
845
+
846
+ inline Vectorized<float> Vectorized<float>::erf() const{
847
+ // constants
848
+ const Vectorized<float> neg_zero_vec(-0.f);
849
+ const Vectorized<float> one_vec(1.0f);
850
+ const Vectorized<float> p(0.3275911f);
851
+ const Vectorized<float> p1(0.254829592f);
852
+ const Vectorized<float> p2(-0.284496736f);
853
+ const Vectorized<float> p3(1.421413741f);
854
+ const Vectorized<float> p4(-1.453152027f);
855
+ const Vectorized<float> p5(1.061405429f);
856
+ // sign(x)
857
+ auto sign_mask = neg_zero_vec & *this;
858
+ auto abs_vec = this->abs();
859
+ // t = 1 / (p * abs(x) + 1)
860
+ auto tmp0 = fmadd(p, abs_vec, one_vec);
861
+ auto t = one_vec / tmp0;
862
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
863
+ auto tmp1 = fmadd(p5, t, p4);
864
+ auto tmp2 = fmadd(tmp1, t, p3);
865
+ auto tmp3 = fmadd(tmp2, t, p2);
866
+ auto r = fmadd(tmp3, t, p1);
867
+ // - exp(- x * x)
868
+ auto pow_2 = (*this) * (*this);
869
+ auto neg_pow_2 = pow_2 ^ neg_zero_vec;
870
+ auto tmp4 = neg_pow_2.map(std::exp); // This can be swapped for a faster implementation of exp.
871
+ auto tmp5 = tmp4 ^ neg_zero_vec;
872
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
873
+ auto tmp6 = t * tmp5;
874
+ auto tmp7 = fmadd(tmp6, r, one_vec);
875
+ return tmp7 ^ sign_mask;
876
+ }
877
+ #endif /* defined(aarch64) */
878
+
879
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h ADDED
@@ -0,0 +1,1540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/macros/Macros.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ namespace at::vec {
12
+ inline namespace CPU_CAPABILITY {
13
+
14
+ #ifdef CPU_CAPABILITY_AVX2
15
+
16
+ struct Vectorizedi {
17
+ protected:
18
+ __m256i values;
19
+
20
+ static inline __m256i invert(const __m256i& v) {
21
+ const auto ones = _mm256_set1_epi64x(-1);
22
+ return _mm256_xor_si256(ones, v);
23
+ }
24
+ public:
25
+ Vectorizedi() {}
26
+ Vectorizedi(__m256i v) : values(v) {}
27
+ operator __m256i() const {
28
+ return values;
29
+ }
30
+ };
31
+
32
+ #else
33
+
34
+ struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined
35
+
36
+ #endif // CPU_CAPABILITY_AVX2
37
+
38
+ #ifdef CPU_CAPABILITY_AVX2
39
+
40
+ template <>
41
+ class Vectorized<int64_t> : public Vectorizedi {
42
+ private:
43
+ static const Vectorized<int64_t> ones;
44
+ public:
45
+ using value_type = int64_t;
46
+ using size_type = int;
47
+ static constexpr size_type size() {
48
+ return 4;
49
+ }
50
+ using Vectorizedi::Vectorizedi;
51
+ Vectorized() {}
52
+ Vectorized(int64_t v) { values = _mm256_set1_epi64x(v); }
53
+ Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4) {
54
+ values = _mm256_setr_epi64x(val1, val2, val3, val4);
55
+ }
56
+ template <int64_t mask>
57
+ static Vectorized<int64_t> blend(Vectorized<int64_t> a, Vectorized<int64_t> b) {
58
+ __at_align__ int64_t tmp_values[size()];
59
+ a.store(tmp_values);
60
+ if (mask & 0x01)
61
+ tmp_values[0] = _mm256_extract_epi64(b.values, 0);
62
+ if (mask & 0x02)
63
+ tmp_values[1] = _mm256_extract_epi64(b.values, 1);
64
+ if (mask & 0x04)
65
+ tmp_values[2] = _mm256_extract_epi64(b.values, 2);
66
+ if (mask & 0x08)
67
+ tmp_values[3] = _mm256_extract_epi64(b.values, 3);
68
+ return loadu(tmp_values);
69
+ }
70
+ static Vectorized<int64_t> blendv(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b,
71
+ const Vectorized<int64_t>& mask) {
72
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
73
+ }
74
+ template <typename step_t>
75
+ static Vectorized<int64_t> arange(int64_t base = 0, step_t step = static_cast<step_t>(1)) {
76
+ return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
77
+ }
78
+ static Vectorized<int64_t>
79
+ set(Vectorized<int64_t> a, Vectorized<int64_t> b, int64_t count = size()) {
80
+ switch (count) {
81
+ case 0:
82
+ return a;
83
+ case 1:
84
+ return blend<1>(a, b);
85
+ case 2:
86
+ return blend<3>(a, b);
87
+ case 3:
88
+ return blend<7>(a, b);
89
+ }
90
+ return b;
91
+ }
92
+ static Vectorized<int64_t> loadu(const void* ptr) {
93
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
94
+ }
95
+ static Vectorized<int64_t> loadu(const void* ptr, int64_t count) {
96
+ __at_align__ int64_t tmp_values[size()];
97
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
98
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
99
+ // instructions while a loop would be compiled to one instruction.
100
+ for (const auto i : c10::irange(size())) {
101
+ tmp_values[i] = 0;
102
+ }
103
+ std::memcpy(tmp_values, ptr, count * sizeof(int64_t));
104
+ return loadu(tmp_values);
105
+ }
106
+ void store(void* ptr, int count = size()) const {
107
+ if (count == size()) {
108
+ // ptr need not to be aligned here. See
109
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
110
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
111
+ } else if (count > 0) {
112
+ __at_align__ int64_t tmp_values[size()];
113
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
114
+ std::memcpy(ptr, tmp_values, count * sizeof(int64_t));
115
+ }
116
+ }
117
+ const int64_t& operator[](int idx) const = delete;
118
+ int64_t& operator[](int idx) = delete;
119
+ Vectorized<int64_t> abs() const {
120
+ auto zero = _mm256_set1_epi64x(0);
121
+ auto is_larger = _mm256_cmpgt_epi64(zero, values);
122
+ auto inverse = _mm256_xor_si256(values, is_larger);
123
+ return _mm256_sub_epi64(inverse, is_larger);
124
+ }
125
+ Vectorized<int64_t> real() const {
126
+ return *this;
127
+ }
128
+ Vectorized<int64_t> imag() const {
129
+ return _mm256_set1_epi64x(0);
130
+ }
131
+ Vectorized<int64_t> conj() const {
132
+ return *this;
133
+ }
134
+ Vectorized<int64_t> neg() const;
135
+ Vectorized<int64_t> operator==(const Vectorized<int64_t>& other) const {
136
+ return _mm256_cmpeq_epi64(values, other.values);
137
+ }
138
+ Vectorized<int64_t> operator!=(const Vectorized<int64_t>& other) const {
139
+ return invert(_mm256_cmpeq_epi64(values, other.values));
140
+ }
141
+ Vectorized<int64_t> operator<(const Vectorized<int64_t>& other) const {
142
+ return _mm256_cmpgt_epi64(other.values, values);
143
+ }
144
+ Vectorized<int64_t> operator<=(const Vectorized<int64_t>& other) const {
145
+ return invert(_mm256_cmpgt_epi64(values, other.values));
146
+ }
147
+ Vectorized<int64_t> operator>(const Vectorized<int64_t>& other) const {
148
+ return _mm256_cmpgt_epi64(values, other.values);
149
+ }
150
+ Vectorized<int64_t> operator>=(const Vectorized<int64_t>& other) const {
151
+ return invert(_mm256_cmpgt_epi64(other.values, values));
152
+ }
153
+
154
+ Vectorized<int64_t> eq(const Vectorized<int64_t>& other) const;
155
+ Vectorized<int64_t> ne(const Vectorized<int64_t>& other) const;
156
+ Vectorized<int64_t> gt(const Vectorized<int64_t>& other) const;
157
+ Vectorized<int64_t> ge(const Vectorized<int64_t>& other) const;
158
+ Vectorized<int64_t> lt(const Vectorized<int64_t>& other) const;
159
+ Vectorized<int64_t> le(const Vectorized<int64_t>& other) const;
160
+ };
161
+
162
+ template <>
163
+ class Vectorized<int32_t> : public Vectorizedi {
164
+ private:
165
+ static const Vectorized<int32_t> ones;
166
+ public:
167
+ using value_type = int32_t;
168
+ static constexpr int size() {
169
+ return 8;
170
+ }
171
+ using Vectorizedi::Vectorizedi;
172
+ Vectorized() {}
173
+ Vectorized(int32_t v) { values = _mm256_set1_epi32(v); }
174
+ Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4,
175
+ int32_t val5, int32_t val6, int32_t val7, int32_t val8) {
176
+ values = _mm256_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8);
177
+ }
178
+ template <int64_t mask>
179
+ static Vectorized<int32_t> blend(Vectorized<int32_t> a, Vectorized<int32_t> b) {
180
+ return _mm256_blend_epi32(a, b, mask);
181
+ }
182
+ static Vectorized<int32_t> blendv(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b,
183
+ const Vectorized<int32_t>& mask) {
184
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
185
+ }
186
+ template <typename step_t>
187
+ static Vectorized<int32_t> arange(int32_t base = 0, step_t step = static_cast<step_t>(1)) {
188
+ return Vectorized<int32_t>(
189
+ base, base + step, base + 2 * step, base + 3 * step,
190
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
191
+ }
192
+ static Vectorized<int32_t>
193
+ set(Vectorized<int32_t> a, Vectorized<int32_t> b, int32_t count = size()) {
194
+ switch (count) {
195
+ case 0:
196
+ return a;
197
+ case 1:
198
+ return blend<1>(a, b);
199
+ case 2:
200
+ return blend<3>(a, b);
201
+ case 3:
202
+ return blend<7>(a, b);
203
+ case 4:
204
+ return blend<15>(a, b);
205
+ case 5:
206
+ return blend<31>(a, b);
207
+ case 6:
208
+ return blend<63>(a, b);
209
+ case 7:
210
+ return blend<127>(a, b);
211
+ }
212
+ return b;
213
+ }
214
+ static Vectorized<int32_t> loadu(const void* ptr) {
215
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
216
+ }
217
+ static Vectorized<int32_t> loadu(const void* ptr, int32_t count) {
218
+ __at_align__ int32_t tmp_values[size()];
219
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
220
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
221
+ // instructions while a loop would be compiled to one instruction.
222
+ for (const auto i : c10::irange(size())) {
223
+ tmp_values[i] = 0;
224
+ }
225
+ std::memcpy(tmp_values, ptr, count * sizeof(int32_t));
226
+ return loadu(tmp_values);
227
+ }
228
+ void store(void* ptr, int count = size()) const {
229
+ if (count == size()) {
230
+ // ptr need not to be aligned here. See
231
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
232
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
233
+ } else if (count > 0) {
234
+ __at_align__ int32_t tmp_values[size()];
235
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
236
+ std::memcpy(ptr, tmp_values, count * sizeof(int32_t));
237
+ }
238
+ }
239
+ const int32_t& operator[](int idx) const = delete;
240
+ int32_t& operator[](int idx) = delete;
241
+ Vectorized<int32_t> abs() const {
242
+ return _mm256_abs_epi32(values);
243
+ }
244
+ Vectorized<int32_t> real() const {
245
+ return *this;
246
+ }
247
+ Vectorized<int32_t> imag() const {
248
+ return _mm256_set1_epi32(0);
249
+ }
250
+ Vectorized<int32_t> conj() const {
251
+ return *this;
252
+ }
253
+ Vectorized<int32_t> neg() const;
254
+ Vectorized<int32_t> operator==(const Vectorized<int32_t>& other) const {
255
+ return _mm256_cmpeq_epi32(values, other.values);
256
+ }
257
+ Vectorized<int32_t> operator!=(const Vectorized<int32_t>& other) const {
258
+ return invert(_mm256_cmpeq_epi32(values, other.values));
259
+ }
260
+ Vectorized<int32_t> operator<(const Vectorized<int32_t>& other) const {
261
+ return _mm256_cmpgt_epi32(other.values, values);
262
+ }
263
+ Vectorized<int32_t> operator<=(const Vectorized<int32_t>& other) const {
264
+ return invert(_mm256_cmpgt_epi32(values, other.values));
265
+ }
266
+ Vectorized<int32_t> operator>(const Vectorized<int32_t>& other) const {
267
+ return _mm256_cmpgt_epi32(values, other.values);
268
+ }
269
+ Vectorized<int32_t> operator>=(const Vectorized<int32_t>& other) const {
270
+ return invert(_mm256_cmpgt_epi32(other.values, values));
271
+ }
272
+ Vectorized<int32_t> eq(const Vectorized<int32_t>& other) const;
273
+ Vectorized<int32_t> ne(const Vectorized<int32_t>& other) const;
274
+ Vectorized<int32_t> gt(const Vectorized<int32_t>& other) const;
275
+ Vectorized<int32_t> ge(const Vectorized<int32_t>& other) const;
276
+ Vectorized<int32_t> lt(const Vectorized<int32_t>& other) const;
277
+ Vectorized<int32_t> le(const Vectorized<int32_t>& other) const;
278
+ };
279
+
280
+ template <>
281
+ inline void convert(const int32_t *src, float *dst, int64_t n) {
282
+ int64_t i;
283
+ // int32_t and float have same size
284
+ #ifndef _MSC_VER
285
+ # pragma unroll
286
+ #endif
287
+ for (i = 0; i <= (n - Vectorized<int32_t>::size()); i += Vectorized<int32_t>::size()) {
288
+ auto input_vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i));
289
+ auto output_vec = _mm256_cvtepi32_ps(input_vec);
290
+ _mm256_storeu_ps(reinterpret_cast<float*>(dst + i), output_vec);
291
+ }
292
+ #ifndef _MSC_VER
293
+ # pragma unroll
294
+ #endif
295
+ for (; i < n; i++) {
296
+ dst[i] = static_cast<float>(src[i]);
297
+ }
298
+ }
299
+
300
+ template <>
301
+ inline void convert(const int32_t *src, double *dst, int64_t n) {
302
+ int64_t i;
303
+ // int32_t has half the size of double
304
+ #ifndef _MSC_VER
305
+ # pragma unroll
306
+ #endif
307
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
308
+ auto input_128_vec = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src + i));
309
+ auto output_vec = _mm256_cvtepi32_pd(input_128_vec);
310
+ _mm256_storeu_pd(reinterpret_cast<double*>(dst + i), output_vec);
311
+ }
312
+ #ifndef _MSC_VER
313
+ # pragma unroll
314
+ #endif
315
+ for (; i < n; i++) {
316
+ dst[i] = static_cast<double>(src[i]);
317
+ }
318
+ }
319
+
320
+ template <>
321
+ class Vectorized<int16_t> : public Vectorizedi {
322
+ private:
323
+ static const Vectorized<int16_t> ones;
324
+ public:
325
+ using value_type = int16_t;
326
+ static constexpr int size() {
327
+ return 16;
328
+ }
329
+ using Vectorizedi::Vectorizedi;
330
+ Vectorized() {}
331
+ Vectorized(int16_t v) { values = _mm256_set1_epi16(v); }
332
+ Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4,
333
+ int16_t val5, int16_t val6, int16_t val7, int16_t val8,
334
+ int16_t val9, int16_t val10, int16_t val11, int16_t val12,
335
+ int16_t val13, int16_t val14, int16_t val15, int16_t val16) {
336
+ values = _mm256_setr_epi16(val1, val2, val3, val4, val5, val6, val7, val8,
337
+ val9, val10, val11, val12, val13, val14, val15, val16);
338
+ }
339
+ template <int64_t mask>
340
+ static Vectorized<int16_t> blend(Vectorized<int16_t> a, Vectorized<int16_t> b) {
341
+ __at_align__ int16_t tmp_values[size()];
342
+ a.store(tmp_values);
343
+ if (mask & 0x01)
344
+ tmp_values[0] = _mm256_extract_epi16(b.values, 0);
345
+ if (mask & 0x02)
346
+ tmp_values[1] = _mm256_extract_epi16(b.values, 1);
347
+ if (mask & 0x04)
348
+ tmp_values[2] = _mm256_extract_epi16(b.values, 2);
349
+ if (mask & 0x08)
350
+ tmp_values[3] = _mm256_extract_epi16(b.values, 3);
351
+ if (mask & 0x10)
352
+ tmp_values[4] = _mm256_extract_epi16(b.values, 4);
353
+ if (mask & 0x20)
354
+ tmp_values[5] = _mm256_extract_epi16(b.values, 5);
355
+ if (mask & 0x40)
356
+ tmp_values[6] = _mm256_extract_epi16(b.values, 6);
357
+ if (mask & 0x80)
358
+ tmp_values[7] = _mm256_extract_epi16(b.values, 7);
359
+ if (mask & 0x100)
360
+ tmp_values[8] = _mm256_extract_epi16(b.values, 8);
361
+ if (mask & 0x200)
362
+ tmp_values[9] = _mm256_extract_epi16(b.values, 9);
363
+ if (mask & 0x400)
364
+ tmp_values[10] = _mm256_extract_epi16(b.values, 10);
365
+ if (mask & 0x800)
366
+ tmp_values[11] = _mm256_extract_epi16(b.values, 11);
367
+ if (mask & 0x1000)
368
+ tmp_values[12] = _mm256_extract_epi16(b.values, 12);
369
+ if (mask & 0x2000)
370
+ tmp_values[13] = _mm256_extract_epi16(b.values, 13);
371
+ if (mask & 0x4000)
372
+ tmp_values[14] = _mm256_extract_epi16(b.values, 14);
373
+ if (mask & 0x8000)
374
+ tmp_values[15] = _mm256_extract_epi16(b.values, 15);
375
+ return loadu(tmp_values);
376
+ }
377
+ static Vectorized<int16_t> blendv(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b,
378
+ const Vectorized<int16_t>& mask) {
379
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
380
+ }
381
+ template <typename step_t>
382
+ static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
383
+ return Vectorized<int16_t>(
384
+ base, base + step, base + 2 * step, base + 3 * step,
385
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
386
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
387
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
388
+ }
389
+ static Vectorized<int16_t>
390
+ set(Vectorized<int16_t> a, Vectorized<int16_t> b, int16_t count = size()) {
391
+ switch (count) {
392
+ case 0:
393
+ return a;
394
+ case 1:
395
+ return blend<1>(a, b);
396
+ case 2:
397
+ return blend<3>(a, b);
398
+ case 3:
399
+ return blend<7>(a, b);
400
+ case 4:
401
+ return blend<15>(a, b);
402
+ case 5:
403
+ return blend<31>(a, b);
404
+ case 6:
405
+ return blend<63>(a, b);
406
+ case 7:
407
+ return blend<127>(a, b);
408
+ case 8:
409
+ return blend<255>(a, b);
410
+ case 9:
411
+ return blend<511>(a, b);
412
+ case 10:
413
+ return blend<1023>(a, b);
414
+ case 11:
415
+ return blend<2047>(a, b);
416
+ case 12:
417
+ return blend<4095>(a, b);
418
+ case 13:
419
+ return blend<8191>(a, b);
420
+ case 14:
421
+ return blend<16383>(a, b);
422
+ case 15:
423
+ return blend<32767>(a, b);
424
+ }
425
+ return b;
426
+ }
427
+ static Vectorized<int16_t> loadu(const void* ptr) {
428
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
429
+ }
430
+ static Vectorized<int16_t> loadu(const void* ptr, int16_t count) {
431
+ __at_align__ int16_t tmp_values[size()];
432
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
433
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
434
+ // instructions while a loop would be compiled to one instruction.
435
+ for (const auto i : c10::irange(size())) {
436
+ tmp_values[i] = 0;
437
+ }
438
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
439
+ return loadu(tmp_values);
440
+ }
441
+ void store(void* ptr, int count = size()) const {
442
+ if (count == size()) {
443
+ // ptr need not to be aligned here. See
444
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
445
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
446
+ } else if (count > 0) {
447
+ __at_align__ int16_t tmp_values[size()];
448
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
449
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
450
+ }
451
+ }
452
+ const int16_t& operator[](int idx) const = delete;
453
+ int16_t& operator[](int idx) = delete;
454
+ Vectorized<int16_t> abs() const {
455
+ return _mm256_abs_epi16(values);
456
+ }
457
+ Vectorized<int16_t> real() const {
458
+ return *this;
459
+ }
460
+ Vectorized<int16_t> imag() const {
461
+ return _mm256_set1_epi16(0);
462
+ }
463
+ Vectorized<int16_t> conj() const {
464
+ return *this;
465
+ }
466
+ Vectorized<int16_t> neg() const;
467
+ Vectorized<int16_t> operator==(const Vectorized<int16_t>& other) const {
468
+ return _mm256_cmpeq_epi16(values, other.values);
469
+ }
470
+ Vectorized<int16_t> operator!=(const Vectorized<int16_t>& other) const {
471
+ return invert(_mm256_cmpeq_epi16(values, other.values));
472
+ }
473
+ Vectorized<int16_t> operator<(const Vectorized<int16_t>& other) const {
474
+ return _mm256_cmpgt_epi16(other.values, values);
475
+ }
476
+ Vectorized<int16_t> operator<=(const Vectorized<int16_t>& other) const {
477
+ return invert(_mm256_cmpgt_epi16(values, other.values));
478
+ }
479
+ Vectorized<int16_t> operator>(const Vectorized<int16_t>& other) const {
480
+ return _mm256_cmpgt_epi16(values, other.values);
481
+ }
482
+ Vectorized<int16_t> operator>=(const Vectorized<int16_t>& other) const {
483
+ return invert(_mm256_cmpgt_epi16(other.values, values));
484
+ }
485
+
486
+ Vectorized<int16_t> eq(const Vectorized<int16_t>& other) const;
487
+ Vectorized<int16_t> ne(const Vectorized<int16_t>& other) const;
488
+ Vectorized<int16_t> gt(const Vectorized<int16_t>& other) const;
489
+ Vectorized<int16_t> ge(const Vectorized<int16_t>& other) const;
490
+ Vectorized<int16_t> lt(const Vectorized<int16_t>& other) const;
491
+ Vectorized<int16_t> le(const Vectorized<int16_t>& other) const;
492
+ };
493
+
494
+ template <typename T>
495
+ class Vectorized8 : public Vectorizedi {
496
+ static_assert(
497
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
498
+ "Only int8_t/uint8_t are supported");
499
+ protected:
500
+ static const Vectorized<T> ones;
501
+ public:
502
+ using value_type = T;
503
+ static constexpr int size() {
504
+ return 32;
505
+ }
506
+ using Vectorizedi::Vectorizedi;
507
+ Vectorized8() {}
508
+ Vectorized8(T v) { values = _mm256_set1_epi8(v); }
509
+ Vectorized8(T val1, T val2, T val3, T val4,
510
+ T val5, T val6, T val7, T val8,
511
+ T val9, T val10, T val11, T val12,
512
+ T val13, T val14, T val15, T val16,
513
+ T val17, T val18, T val19, T val20,
514
+ T val21, T val22, T val23, T val24,
515
+ T val25, T val26, T val27, T val28,
516
+ T val29, T val30, T val31, T val32) {
517
+ values = _mm256_setr_epi8(val1, val2, val3, val4, val5, val6, val7, val8,
518
+ val9, val10, val11, val12, val13, val14, val15, val16,
519
+ val17, val18, val19, val20, val21, val22, val23, val24,
520
+ val25, val26, val27, val28, val29, val30, val31, val32);
521
+ }
522
+ template <int64_t mask>
523
+ static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
524
+ __at_align__ T tmp_values[size()];
525
+ a.store(tmp_values);
526
+ if (mask & 0x01)
527
+ tmp_values[0] = _mm256_extract_epi8(b.values, 0);
528
+ if (mask & 0x02)
529
+ tmp_values[1] = _mm256_extract_epi8(b.values, 1);
530
+ if (mask & 0x04)
531
+ tmp_values[2] = _mm256_extract_epi8(b.values, 2);
532
+ if (mask & 0x08)
533
+ tmp_values[3] = _mm256_extract_epi8(b.values, 3);
534
+ if (mask & 0x10)
535
+ tmp_values[4] = _mm256_extract_epi8(b.values, 4);
536
+ if (mask & 0x20)
537
+ tmp_values[5] = _mm256_extract_epi8(b.values, 5);
538
+ if (mask & 0x40)
539
+ tmp_values[6] = _mm256_extract_epi8(b.values, 6);
540
+ if (mask & 0x80)
541
+ tmp_values[7] = _mm256_extract_epi8(b.values, 7);
542
+ if (mask & 0x100)
543
+ tmp_values[8] = _mm256_extract_epi8(b.values, 8);
544
+ if (mask & 0x200)
545
+ tmp_values[9] = _mm256_extract_epi8(b.values, 9);
546
+ if (mask & 0x400)
547
+ tmp_values[10] = _mm256_extract_epi8(b.values, 10);
548
+ if (mask & 0x800)
549
+ tmp_values[11] = _mm256_extract_epi8(b.values, 11);
550
+ if (mask & 0x1000)
551
+ tmp_values[12] = _mm256_extract_epi8(b.values, 12);
552
+ if (mask & 0x2000)
553
+ tmp_values[13] = _mm256_extract_epi8(b.values, 13);
554
+ if (mask & 0x4000)
555
+ tmp_values[14] = _mm256_extract_epi8(b.values, 14);
556
+ if (mask & 0x8000)
557
+ tmp_values[15] = _mm256_extract_epi8(b.values, 15);
558
+ if (mask & 0x010000)
559
+ tmp_values[16] = _mm256_extract_epi8(b.values, 16);
560
+ if (mask & 0x020000)
561
+ tmp_values[17] = _mm256_extract_epi8(b.values, 17);
562
+ if (mask & 0x040000)
563
+ tmp_values[18] = _mm256_extract_epi8(b.values, 18);
564
+ if (mask & 0x080000)
565
+ tmp_values[19] = _mm256_extract_epi8(b.values, 19);
566
+ if (mask & 0x100000)
567
+ tmp_values[20] = _mm256_extract_epi8(b.values, 20);
568
+ if (mask & 0x200000)
569
+ tmp_values[21] = _mm256_extract_epi8(b.values, 21);
570
+ if (mask & 0x400000)
571
+ tmp_values[22] = _mm256_extract_epi8(b.values, 22);
572
+ if (mask & 0x800000)
573
+ tmp_values[23] = _mm256_extract_epi8(b.values, 23);
574
+ if (mask & 0x1000000)
575
+ tmp_values[24] = _mm256_extract_epi8(b.values, 24);
576
+ if (mask & 0x2000000)
577
+ tmp_values[25] = _mm256_extract_epi8(b.values, 25);
578
+ if (mask & 0x4000000)
579
+ tmp_values[26] = _mm256_extract_epi8(b.values, 26);
580
+ if (mask & 0x8000000)
581
+ tmp_values[27] = _mm256_extract_epi8(b.values, 27);
582
+ if (mask & 0x10000000)
583
+ tmp_values[28] = _mm256_extract_epi8(b.values, 28);
584
+ if (mask & 0x20000000)
585
+ tmp_values[29] = _mm256_extract_epi8(b.values, 29);
586
+ if (mask & 0x40000000)
587
+ tmp_values[30] = _mm256_extract_epi8(b.values, 30);
588
+ if (mask & 0x80000000)
589
+ tmp_values[31] = _mm256_extract_epi8(b.values, 31);
590
+ return loadu(tmp_values);
591
+ }
592
+ static Vectorized<T> blendv(const Vectorized<T>& a, const Vectorized<T>& b,
593
+ const Vectorized<T>& mask) {
594
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
595
+ }
596
+ template <typename step_t>
597
+ static Vectorized<T> arange(T base = 0, step_t step = static_cast<step_t>(1)) {
598
+ return Vectorized<T>(
599
+ base, base + step, base + 2 * step, base + 3 * step,
600
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
601
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
602
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
603
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
604
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
605
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
606
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
607
+ }
608
+ static Vectorized<T>
609
+ set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
610
+ switch (count) {
611
+ case 0:
612
+ return a;
613
+ case 1:
614
+ return blend<0x1>(a, b);
615
+ case 2:
616
+ return blend<0x3>(a, b);
617
+ case 3:
618
+ return blend<0x7>(a, b);
619
+ case 4:
620
+ return blend<0xF>(a, b);
621
+ case 5:
622
+ return blend<0x1F>(a, b);
623
+ case 6:
624
+ return blend<0x3F>(a, b);
625
+ case 7:
626
+ return blend<0x7F>(a, b);
627
+ case 8:
628
+ return blend<0xFF>(a, b);
629
+ case 9:
630
+ return blend<0x1FF>(a, b);
631
+ case 10:
632
+ return blend<0x3FF>(a, b);
633
+ case 11:
634
+ return blend<0x7FF>(a, b);
635
+ case 12:
636
+ return blend<0xFFF>(a, b);
637
+ case 13:
638
+ return blend<0x1FFF>(a, b);
639
+ case 14:
640
+ return blend<0x3FFF>(a, b);
641
+ case 15:
642
+ return blend<0x7FFF>(a, b);
643
+ case 16:
644
+ return blend<0xFFFF>(a, b);
645
+ case 17:
646
+ return blend<0x1FFFF>(a, b);
647
+ case 18:
648
+ return blend<0x3FFFF>(a, b);
649
+ case 19:
650
+ return blend<0x7FFFF>(a, b);
651
+ case 20:
652
+ return blend<0xFFFFF>(a, b);
653
+ case 21:
654
+ return blend<0x1FFFFF>(a, b);
655
+ case 22:
656
+ return blend<0x3FFFFF>(a, b);
657
+ case 23:
658
+ return blend<0x7FFFFF>(a, b);
659
+ case 24:
660
+ return blend<0xFFFFFF>(a, b);
661
+ case 25:
662
+ return blend<0x1FFFFFF>(a, b);
663
+ case 26:
664
+ return blend<0x3FFFFFF>(a, b);
665
+ case 27:
666
+ return blend<0x7FFFFFF>(a, b);
667
+ case 28:
668
+ return blend<0xFFFFFFF>(a, b);
669
+ case 29:
670
+ return blend<0x1FFFFFFF>(a, b);
671
+ case 30:
672
+ return blend<0x3FFFFFFF>(a, b);
673
+ case 31:
674
+ return blend<0x7FFFFFFF>(a, b);
675
+ }
676
+ return b;
677
+ }
678
+ static Vectorized<T> loadu(const void* ptr) {
679
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
680
+ }
681
+ static Vectorized<T> loadu_one_fourth(const void* ptr) {
682
+ // Fast path if only load element number of 8.
683
+ // Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
684
+ // Because loadu(const void* ptr, T count) requires zero initialization for upper 128 bits.
685
+ // However, by using _mm256_castsi128_si256, the upper 128 bits of the result are undefined.
686
+ // TODO<leslie> We can use _mm256_zextsi128_si256 in the furture,
687
+ // since gcc 9.3 doesn't support it now.
688
+ __m128i input_128 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr));
689
+ return _mm256_castsi128_si256(input_128);
690
+ }
691
+ static Vectorized<T> loadu(const void* ptr, T count) {
692
+ __at_align__ T tmp_values[size()];
693
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
694
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
695
+ // instructions while a loop would be compiled to one instruction.
696
+ for (const auto i : c10::irange(size())) {
697
+ tmp_values[i] = 0;
698
+ }
699
+ std::memcpy(tmp_values, ptr, count * sizeof(T));
700
+ return loadu(tmp_values);
701
+ }
702
+ void store(void* ptr, int count = size()) const {
703
+ if (count == size()) {
704
+ // ptr need not to be aligned here. See
705
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
706
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
707
+ } else if (count > 0) {
708
+ if (count == 8) {
709
+ // Fast path if only store element number of 8
710
+ _mm_storel_epi64(reinterpret_cast<__m128i*>(ptr), _mm256_castsi256_si128(values));
711
+ } else {
712
+ __at_align__ T tmp_values[size()];
713
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
714
+ std::memcpy(ptr, tmp_values, count * sizeof(T));
715
+ }
716
+ }
717
+ }
718
+ const T& operator[](int idx) const = delete;
719
+ T& operator[](int idx) = delete;
720
+ Vectorized<T> real() const {
721
+ return *this;
722
+ }
723
+ Vectorized<T> imag() const {
724
+ return _mm256_set1_epi8(0);
725
+ }
726
+ Vectorized<T> conj() const {
727
+ return *this;
728
+ }
729
+ };
730
+
731
+ template<>
732
+ class Vectorized<int8_t>: public Vectorized8<int8_t> {
733
+ public:
734
+ using Vectorized8::Vectorized8;
735
+
736
+ Vectorized<int8_t> neg() const;
737
+
738
+ Vectorized<int8_t> abs() const {
739
+ return _mm256_abs_epi8(values);
740
+ }
741
+
742
+ Vectorized<int8_t> operator==(const Vectorized<int8_t>& other) const {
743
+ return _mm256_cmpeq_epi8(values, other.values);
744
+ }
745
+ Vectorized<int8_t> operator!=(const Vectorized<int8_t>& other) const {
746
+ return invert(_mm256_cmpeq_epi8(values, other.values));
747
+ }
748
+ Vectorized<int8_t> operator<(const Vectorized<int8_t>& other) const {
749
+ return _mm256_cmpgt_epi8(other.values, values);
750
+ }
751
+ Vectorized<int8_t> operator<=(const Vectorized<int8_t>& other) const {
752
+ return invert(_mm256_cmpgt_epi8(values, other.values));
753
+ }
754
+ Vectorized<int8_t> operator>(const Vectorized<int8_t>& other) const {
755
+ return other < *this;
756
+ }
757
+ Vectorized<int8_t> operator>=(const Vectorized<int8_t>& other) const {
758
+ return other <= *this;
759
+ }
760
+
761
+ Vectorized<int8_t> eq(const Vectorized<int8_t>& other) const;
762
+ Vectorized<int8_t> ne(const Vectorized<int8_t>& other) const;
763
+ Vectorized<int8_t> gt(const Vectorized<int8_t>& other) const;
764
+ Vectorized<int8_t> ge(const Vectorized<int8_t>& other) const;
765
+ Vectorized<int8_t> lt(const Vectorized<int8_t>& other) const;
766
+ Vectorized<int8_t> le(const Vectorized<int8_t>& other) const;
767
+ };
768
+
769
+ template<>
770
+ class Vectorized<uint8_t>: public Vectorized8<uint8_t> {
771
+ public:
772
+ using Vectorized8::Vectorized8;
773
+
774
+ Vectorized<uint8_t> neg() const;
775
+
776
+ Vectorized<uint8_t> abs() const {
777
+ return *this;
778
+ }
779
+
780
+ Vectorized<uint8_t> operator==(const Vectorized<uint8_t>& other) const {
781
+ return _mm256_cmpeq_epi8(values, other.values);
782
+ }
783
+ Vectorized<uint8_t> operator!=(const Vectorized<uint8_t>& other) const {
784
+ return invert(_mm256_cmpeq_epi8(values, other.values));
785
+ }
786
+ Vectorized<uint8_t> operator<(const Vectorized<uint8_t>& other) const {
787
+ __m256i max = _mm256_max_epu8(values, other.values);
788
+ return invert(_mm256_cmpeq_epi8(max, values));
789
+ }
790
+ Vectorized<uint8_t> operator<=(const Vectorized<uint8_t>& other) const {
791
+ __m256i max = _mm256_max_epu8(values, other.values);
792
+ return _mm256_cmpeq_epi8(max, other.values);
793
+ }
794
+ Vectorized<uint8_t> operator>(const Vectorized<uint8_t>& other) const {
795
+ return other < *this;
796
+ }
797
+ Vectorized<uint8_t> operator>=(const Vectorized<uint8_t>& other) const {
798
+ return other <= *this;
799
+ }
800
+
801
+ Vectorized<uint8_t> eq(const Vectorized<uint8_t>& other) const;
802
+ Vectorized<uint8_t> ne(const Vectorized<uint8_t>& other) const;
803
+ Vectorized<uint8_t> gt(const Vectorized<uint8_t>& other) const;
804
+ Vectorized<uint8_t> ge(const Vectorized<uint8_t>& other) const;
805
+ Vectorized<uint8_t> lt(const Vectorized<uint8_t>& other) const;
806
+ Vectorized<uint8_t> le(const Vectorized<uint8_t>& other) const;
807
+ };
808
+
809
+ template <>
810
+ Vectorized<int64_t> inline operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
811
+ return _mm256_add_epi64(a, b);
812
+ }
813
+
814
+ template <>
815
+ Vectorized<int32_t> inline operator+(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
816
+ return _mm256_add_epi32(a, b);
817
+ }
818
+
819
+ template <>
820
+ Vectorized<int16_t> inline operator+(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
821
+ return _mm256_add_epi16(a, b);
822
+ }
823
+
824
+ template <>
825
+ Vectorized<int8_t> inline operator+(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
826
+ return _mm256_add_epi8(a, b);
827
+ }
828
+
829
+ template <>
830
+ Vectorized<uint8_t> inline operator+(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
831
+ return _mm256_add_epi8(a, b);
832
+ }
833
+
834
+ template <>
835
+ Vectorized<int64_t> inline operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
836
+ return _mm256_sub_epi64(a, b);
837
+ }
838
+
839
+ template <>
840
+ Vectorized<int32_t> inline operator-(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
841
+ return _mm256_sub_epi32(a, b);
842
+ }
843
+
844
+ template <>
845
+ Vectorized<int16_t> inline operator-(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
846
+ return _mm256_sub_epi16(a, b);
847
+ }
848
+
849
+ template <>
850
+ Vectorized<int8_t> inline operator-(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
851
+ return _mm256_sub_epi8(a, b);
852
+ }
853
+
854
+ template <>
855
+ Vectorized<uint8_t> inline operator-(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
856
+ return _mm256_sub_epi8(a, b);
857
+ }
858
+
859
+ // Negation. Defined here so we can utilize operator-
860
+ inline Vectorized<int64_t> Vectorized<int64_t>::neg() const {
861
+ return Vectorized<int64_t>(0) - *this;
862
+ }
863
+
864
+ inline Vectorized<int32_t> Vectorized<int32_t>::neg() const {
865
+ return Vectorized<int32_t>(0) - *this;
866
+ }
867
+
868
+ inline Vectorized<int16_t> Vectorized<int16_t>::neg() const {
869
+ return Vectorized<int16_t>(0) - *this;
870
+ }
871
+
872
+ inline Vectorized<int8_t> Vectorized<int8_t>::neg() const {
873
+ return Vectorized<int8_t>(0) - *this;
874
+ }
875
+
876
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::neg() const {
877
+ return Vectorized<uint8_t>(0) - *this;
878
+ }
879
+
880
+ // Emulate operations with no native 64-bit support in avx,
881
+ // by extracting each element, performing the operation pointwise,
882
+ // then combining the results into a vector.
883
+ template <typename op_t>
884
+ Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const op_t& op) {
885
+ int64_t a0 = _mm256_extract_epi64(a, 0);
886
+ int64_t a1 = _mm256_extract_epi64(a, 1);
887
+ int64_t a2 = _mm256_extract_epi64(a, 2);
888
+ int64_t a3 = _mm256_extract_epi64(a, 3);
889
+
890
+ int64_t b0 = _mm256_extract_epi64(b, 0);
891
+ int64_t b1 = _mm256_extract_epi64(b, 1);
892
+ int64_t b2 = _mm256_extract_epi64(b, 2);
893
+ int64_t b3 = _mm256_extract_epi64(b, 3);
894
+
895
+ int64_t c0 = op(a0, b0);
896
+ int64_t c1 = op(a1, b1);
897
+ int64_t c2 = op(a2, b2);
898
+ int64_t c3 = op(a3, b3);
899
+
900
+ return _mm256_set_epi64x(c3, c2, c1, c0);
901
+ }
902
+
903
+ template <typename op_t>
904
+ Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const Vectorized<int64_t>& c, const op_t& op) {
905
+ int64_t a0 = _mm256_extract_epi64(a, 0);
906
+ int64_t a1 = _mm256_extract_epi64(a, 1);
907
+ int64_t a2 = _mm256_extract_epi64(a, 2);
908
+ int64_t a3 = _mm256_extract_epi64(a, 3);
909
+
910
+ int64_t b0 = _mm256_extract_epi64(b, 0);
911
+ int64_t b1 = _mm256_extract_epi64(b, 1);
912
+ int64_t b2 = _mm256_extract_epi64(b, 2);
913
+ int64_t b3 = _mm256_extract_epi64(b, 3);
914
+
915
+ int64_t c0 = _mm256_extract_epi64(c, 0);
916
+ int64_t c1 = _mm256_extract_epi64(c, 1);
917
+ int64_t c2 = _mm256_extract_epi64(c, 2);
918
+ int64_t c3 = _mm256_extract_epi64(c, 3);
919
+
920
+ int64_t d0 = op(a0, b0, c0);
921
+ int64_t d1 = op(a1, b1, c1);
922
+ int64_t d2 = op(a2, b2, c2);
923
+ int64_t d3 = op(a3, b3, c3);
924
+
925
+ return _mm256_set_epi64x(d3, d2, d1, d0);
926
+ }
927
+
928
+ // AVX2 has no intrinsic for int64_t multiply so it needs to be emulated
929
+ // This could be implemented more efficiently using epi32 instructions
930
+ // This is also technically avx compatible, but then we'll need AVX
931
+ // code for add as well.
932
+ // Note: intentionally ignores undefined behavior like (-lowest * -1).
933
+ template <>
934
+ Vectorized<int64_t> inline operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
935
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) __ubsan_ignore_undefined__ {return a_point * b_point;});
936
+ }
937
+
938
+ template <>
939
+ Vectorized<int32_t> inline operator*(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
940
+ return _mm256_mullo_epi32(a, b);
941
+ }
942
+
943
+ template <>
944
+ Vectorized<int16_t> inline operator*(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
945
+ return _mm256_mullo_epi16(a, b);
946
+ }
947
+
948
+ template <typename T, typename Op>
949
+ Vectorized<T> inline int_elementwise_binary_256(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
950
+ T values_a[Vectorized<T>::size()];
951
+ T values_b[Vectorized<T>::size()];
952
+ a.store(values_a);
953
+ b.store(values_b);
954
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
955
+ values_a[i] = op(values_a[i], values_b[i]);
956
+ }
957
+ return Vectorized<T>::loadu(values_a);
958
+ }
959
+
960
+ template <>
961
+ Vectorized<int8_t> inline operator*(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
962
+ // We don't have an instruction for multiplying int8_t
963
+ return int_elementwise_binary_256(a, b, std::multiplies<int8_t>());
964
+ }
965
+
966
+ template <>
967
+ Vectorized<uint8_t> inline operator*(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
968
+ // We don't have an instruction for multiplying uint8_t
969
+ return int_elementwise_binary_256(a, b, std::multiplies<uint8_t>());
970
+ }
971
+
972
+ template <>
973
+ Vectorized<int64_t> inline minimum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
974
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::min(a_point, b_point);});
975
+ }
976
+
977
+ template <>
978
+ Vectorized<int32_t> inline minimum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
979
+ return _mm256_min_epi32(a, b);
980
+ }
981
+
982
+ template <>
983
+ Vectorized<int16_t> inline minimum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
984
+ return _mm256_min_epi16(a, b);
985
+ }
986
+
987
+ template <>
988
+ Vectorized<int8_t> inline minimum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
989
+ return _mm256_min_epi8(a, b);
990
+ }
991
+
992
+ template <>
993
+ Vectorized<uint8_t> inline minimum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
994
+ return _mm256_min_epu8(a, b);
995
+ }
996
+
997
+ template <>
998
+ Vectorized<int64_t> inline maximum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
999
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::max(a_point, b_point);});
1000
+ }
1001
+
1002
+ template <>
1003
+ Vectorized<int32_t> inline maximum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1004
+ return _mm256_max_epi32(a, b);
1005
+ }
1006
+
1007
+ template <>
1008
+ Vectorized<int16_t> inline maximum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1009
+ return _mm256_max_epi16(a, b);
1010
+ }
1011
+
1012
+ template <>
1013
+ Vectorized<int8_t> inline maximum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1014
+ return _mm256_max_epi8(a, b);
1015
+ }
1016
+
1017
+ template <>
1018
+ Vectorized<uint8_t> inline maximum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1019
+ return _mm256_max_epu8(a, b);
1020
+ }
1021
+
1022
+ template <>
1023
+ Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, const Vectorized<int64_t>& max_val) {
1024
+ return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {return std::min(max_point, std::max(a_point, min_point));});
1025
+ }
1026
+
1027
+ template <>
1028
+ Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, const Vectorized<int32_t>& max_val) {
1029
+ return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val));
1030
+ }
1031
+
1032
+ template <>
1033
+ Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, const Vectorized<int16_t>& max_val) {
1034
+ return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val));
1035
+ }
1036
+
1037
+ template <>
1038
+ Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, const Vectorized<int8_t>& max_val) {
1039
+ return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val));
1040
+ }
1041
+
1042
+ template <>
1043
+ Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, const Vectorized<uint8_t>& max_val) {
1044
+ return _mm256_min_epu8(max_val, _mm256_max_epu8(a, min_val));
1045
+ }
1046
+
1047
+ template <>
1048
+ Vectorized<int64_t> inline clamp_max(const Vectorized<int64_t>& a, const Vectorized<int64_t>& max_val) {
1049
+ return emulate(a, max_val, [](int64_t a_point, int64_t max_point) {return std::min(max_point, a_point);});
1050
+ }
1051
+
1052
+ template <>
1053
+ Vectorized<int32_t> inline clamp_max(const Vectorized<int32_t>& a, const Vectorized<int32_t>& max_val) {
1054
+ return _mm256_min_epi32(max_val, a);
1055
+ }
1056
+
1057
+ template <>
1058
+ Vectorized<int16_t> inline clamp_max(const Vectorized<int16_t>& a, const Vectorized<int16_t>& max_val) {
1059
+ return _mm256_min_epi16(max_val, a);
1060
+ }
1061
+
1062
+ template <>
1063
+ Vectorized<int8_t> inline clamp_max(const Vectorized<int8_t>& a, const Vectorized<int8_t>& max_val) {
1064
+ return _mm256_min_epi8(max_val, a);
1065
+ }
1066
+
1067
+ template <>
1068
+ Vectorized<uint8_t> inline clamp_max(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& max_val) {
1069
+ return _mm256_min_epu8(max_val, a);
1070
+ }
1071
+
1072
+ template <>
1073
+ Vectorized<int64_t> inline clamp_min(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val) {
1074
+ return emulate(a, min_val, [](int64_t a_point, int64_t min_point) {return std::max(min_point, a_point);});
1075
+ }
1076
+
1077
+ template <>
1078
+ Vectorized<int32_t> inline clamp_min(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val) {
1079
+ return _mm256_max_epi32(min_val, a);
1080
+ }
1081
+
1082
+ template <>
1083
+ Vectorized<int16_t> inline clamp_min(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val) {
1084
+ return _mm256_max_epi16(min_val, a);
1085
+ }
1086
+
1087
+ template <>
1088
+ Vectorized<int8_t> inline clamp_min(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val) {
1089
+ return _mm256_max_epi8(min_val, a);
1090
+ }
1091
+
1092
+ template <>
1093
+ Vectorized<uint8_t> inline clamp_min(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val) {
1094
+ return _mm256_max_epu8(min_val, a);
1095
+ }
1096
+
1097
+ template<typename T>
1098
+ Vectorized<int32_t> inline convert_to_int32(const T* ptr) {
1099
+ return Vectorized<int32_t>::loadu(ptr);
1100
+ }
1101
+
1102
+ template<>
1103
+ Vectorized<int32_t> inline convert_to_int32<int8_t>(const int8_t* ptr) {
1104
+ return _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
1105
+ }
1106
+
1107
+ template<>
1108
+ Vectorized<int32_t> inline convert_to_int32<uint8_t>(const uint8_t* ptr) {
1109
+ return _mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
1110
+ }
1111
+
1112
+ template <>
1113
+ Vectorized<int64_t> inline operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1114
+ return int_elementwise_binary_256(a, b, std::divides<int64_t>());
1115
+ }
1116
+ template <>
1117
+ Vectorized<int32_t> inline operator/(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1118
+ return int_elementwise_binary_256(a, b, std::divides<int32_t>());
1119
+ }
1120
+ template <>
1121
+ Vectorized<int16_t> inline operator/(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1122
+ return int_elementwise_binary_256(a, b, std::divides<int16_t>());
1123
+ }
1124
+ template <>
1125
+ Vectorized<int8_t> inline operator/(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1126
+ return int_elementwise_binary_256(a, b, std::divides<int8_t>());
1127
+ }
1128
+ template <>
1129
+ Vectorized<uint8_t> inline operator/(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1130
+ return int_elementwise_binary_256(a, b, std::divides<uint8_t>());
1131
+ }
1132
+
1133
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1134
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
1135
+ return _mm256_and_si256(a, b);
1136
+ }
1137
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1138
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
1139
+ return _mm256_or_si256(a, b);
1140
+ }
1141
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1142
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
1143
+ return _mm256_xor_si256(a, b);
1144
+ }
1145
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1146
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
1147
+ return _mm256_xor_si256(a, _mm256_set1_epi32(-1));
1148
+ }
1149
+
1150
+ inline Vectorized<int64_t> Vectorized<int64_t>::eq(const Vectorized<int64_t>& other) const {
1151
+ return (*this == other) & Vectorized<int64_t>(1);
1152
+ }
1153
+
1154
+ inline Vectorized<int64_t> Vectorized<int64_t>::ne(const Vectorized<int64_t>& other) const {
1155
+ return (*this != other) & Vectorized<int64_t>(1);
1156
+ }
1157
+
1158
+ inline Vectorized<int64_t> Vectorized<int64_t>::gt(const Vectorized<int64_t>& other) const {
1159
+ return (*this > other) & Vectorized<int64_t>(1);
1160
+ }
1161
+
1162
+ inline Vectorized<int64_t> Vectorized<int64_t>::ge(const Vectorized<int64_t>& other) const {
1163
+ return (*this >= other) & Vectorized<int64_t>(1);
1164
+ }
1165
+
1166
+ inline Vectorized<int64_t> Vectorized<int64_t>::lt(const Vectorized<int64_t>& other) const {
1167
+ return (*this < other) & Vectorized<int64_t>(1);
1168
+ }
1169
+
1170
+ inline Vectorized<int64_t> Vectorized<int64_t>::le(const Vectorized<int64_t>& other) const {
1171
+ return (*this <= other) & Vectorized<int64_t>(1);
1172
+ }
1173
+
1174
+ inline Vectorized<int32_t> Vectorized<int32_t>::eq(const Vectorized<int32_t>& other) const {
1175
+ return (*this == other) & Vectorized<int32_t>(1);
1176
+ }
1177
+
1178
+ inline Vectorized<int32_t> Vectorized<int32_t>::ne(const Vectorized<int32_t>& other) const {
1179
+ return (*this != other) & Vectorized<int32_t>(1);
1180
+ }
1181
+
1182
+ inline Vectorized<int32_t> Vectorized<int32_t>::gt(const Vectorized<int32_t>& other) const {
1183
+ return (*this > other) & Vectorized<int32_t>(1);
1184
+ }
1185
+
1186
+ inline Vectorized<int32_t> Vectorized<int32_t>::ge(const Vectorized<int32_t>& other) const {
1187
+ return (*this >= other) & Vectorized<int32_t>(1);
1188
+ }
1189
+
1190
+ inline Vectorized<int32_t> Vectorized<int32_t>::lt(const Vectorized<int32_t>& other) const {
1191
+ return (*this < other) & Vectorized<int32_t>(1);
1192
+ }
1193
+
1194
+ inline Vectorized<int32_t> Vectorized<int32_t>::le(const Vectorized<int32_t>& other) const {
1195
+ return (*this <= other) & Vectorized<int32_t>(1);
1196
+ }
1197
+
1198
+ inline Vectorized<int16_t> Vectorized<int16_t>::eq(const Vectorized<int16_t>& other) const {
1199
+ return (*this == other) & Vectorized<int16_t>(1);
1200
+ }
1201
+
1202
+ inline Vectorized<int16_t> Vectorized<int16_t>::ne(const Vectorized<int16_t>& other) const {
1203
+ return (*this != other) & Vectorized<int16_t>(1);
1204
+ }
1205
+
1206
+ inline Vectorized<int16_t> Vectorized<int16_t>::gt(const Vectorized<int16_t>& other) const {
1207
+ return (*this > other) & Vectorized<int16_t>(1);
1208
+ }
1209
+
1210
+ inline Vectorized<int16_t> Vectorized<int16_t>::ge(const Vectorized<int16_t>& other) const {
1211
+ return (*this >= other) & Vectorized<int16_t>(1);
1212
+ }
1213
+
1214
+ inline Vectorized<int16_t> Vectorized<int16_t>::lt(const Vectorized<int16_t>& other) const {
1215
+ return (*this < other) & Vectorized<int16_t>(1);
1216
+ }
1217
+
1218
+ inline Vectorized<int16_t> Vectorized<int16_t>::le(const Vectorized<int16_t>& other) const {
1219
+ return (*this <= other) & Vectorized<int16_t>(1);
1220
+ }
1221
+
1222
+ inline Vectorized<int8_t> Vectorized<int8_t>::eq(const Vectorized<int8_t>& other) const {
1223
+ return (*this == other) & Vectorized<int8_t>(1);
1224
+ }
1225
+
1226
+ inline Vectorized<int8_t> Vectorized<int8_t>::ne(const Vectorized<int8_t>& other) const {
1227
+ return (*this != other) & Vectorized<int8_t>(1);
1228
+ }
1229
+
1230
+ inline Vectorized<int8_t> Vectorized<int8_t>::gt(const Vectorized<int8_t>& other) const {
1231
+ return (*this > other) & Vectorized<int8_t>(1);
1232
+ }
1233
+
1234
+ inline Vectorized<int8_t> Vectorized<int8_t>::ge(const Vectorized<int8_t>& other) const {
1235
+ return (*this >= other) & Vectorized<int8_t>(1);
1236
+ }
1237
+
1238
+ inline Vectorized<int8_t> Vectorized<int8_t>::lt(const Vectorized<int8_t>& other) const {
1239
+ return (*this < other) & Vectorized<int8_t>(1);
1240
+ }
1241
+
1242
+ inline Vectorized<int8_t> Vectorized<int8_t>::le(const Vectorized<int8_t>& other) const {
1243
+ return (*this <= other) & Vectorized<int8_t>(1);
1244
+ }
1245
+
1246
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::eq(const Vectorized<uint8_t>& other) const {
1247
+ return (*this == other) & Vectorized<uint8_t>(1);
1248
+ }
1249
+
1250
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ne(const Vectorized<uint8_t>& other) const {
1251
+ return (*this != other) & Vectorized<uint8_t>(1);
1252
+ }
1253
+
1254
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::gt(const Vectorized<uint8_t>& other) const {
1255
+ return (*this > other) & Vectorized<uint8_t>(1);
1256
+ }
1257
+
1258
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ge(const Vectorized<uint8_t>& other) const {
1259
+ return (*this >= other) & Vectorized<uint8_t>(1);
1260
+ }
1261
+
1262
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::lt(const Vectorized<uint8_t>& other) const {
1263
+ return (*this < other) & Vectorized<uint8_t>(1);
1264
+ }
1265
+
1266
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::le(const Vectorized<uint8_t>& other) const {
1267
+ return (*this <= other) & Vectorized<uint8_t>(1);
1268
+ }
1269
+
1270
+ template <bool left_shift>
1271
+ Vectorized<int16_t> inline shift_256_16(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1272
+ // No vector instruction for shifting int16_t, so emulating it instead.
1273
+
1274
+ // Control masks for shuffle operation, treating 256 bits as an
1275
+ // array of 16-bit elements, and considering pairs of neighboring
1276
+ // elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and
1277
+ // M!=N) is set so that shuffle will move element with index M from
1278
+ // input pair into element with index N in output pair, and element
1279
+ // with index M in output pair will be set to all 0s.
1280
+ __m256i ctl_0_1 = _mm256_set_epi8(29, 28, 0x80, 0x80, 25, 24, 0x80, 0x80,
1281
+ 21, 20, 0x80, 0x80, 17, 16, 0x80, 0x80,
1282
+ 13, 12, 0x80, 0x80, 9, 8, 0x80, 0x80,
1283
+ 5, 4, 0x80, 0x80, 1, 0, 0x80, 0x80);
1284
+ __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 31, 30, 0x80, 0x80, 27, 26,
1285
+ 0x80, 0x80, 23, 22, 0x80, 0x80, 19, 18,
1286
+ 0x80, 0x80, 15, 14, 0x80, 0x80, 11, 10,
1287
+ 0x80, 0x80, 7, 6, 0x80, 0x80, 3, 2);
1288
+
1289
+ // Masks for bitwise and operation, treating 256 bits as an array of
1290
+ // 16-bit elements, and considering them in pairs of neighboring
1291
+ // elements. A mask named "keep_M" (M in [0,1]) is set so that
1292
+ // bitwise and will copy element with index M from input pair into
1293
+ // element with the same index in output pair, while the other
1294
+ // element in output pair will be set to all 0s.
1295
+ __m256i keep_0 = _mm256_set1_epi32(0xFFFF);
1296
+ __m256i keep_1 = _mm256_set1_epi32(0xFFFF0000);
1297
+
1298
+ // Take each 16-bit element with idx%2==0 from input array to be
1299
+ // shifted and extend it to 32 bits so that 0s are added to the
1300
+ // right. Then, perform shifting on this 32-bit number. Upper 16
1301
+ // bits will be proper result of shifting original 16-bit number, so
1302
+ // write them to result array, into the same position from which
1303
+ // corresponding input element is taken. Also, make sure that
1304
+ // result array elements with idx%2!=0 are set to all 0s.
1305
+ //
1306
+ // Note that number of bits to shift for is extended to 32 bits by
1307
+ // adding 0s to the left. That means this number is not properly
1308
+ // sign-extended for negative values. However, number of bits to
1309
+ // shift is treated as an unsigned integer by respective shift
1310
+ // intrinsics anyway so if negative then either with or without
1311
+ // proper sign extension, it will be interpreted as a number greater
1312
+ // than 32, and the shifting result will be the same.
1313
+ __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_1);
1314
+ __m256i b0 = _mm256_and_si256(b, keep_0);
1315
+ __m256i c0;
1316
+ if (left_shift)
1317
+ c0 = _mm256_sllv_epi32(a0, b0);
1318
+ else
1319
+ c0 = _mm256_srav_epi32(a0, b0);
1320
+ c0 = _mm256_shuffle_epi8(c0, ctl_1_0);
1321
+
1322
+ // Peform shifting the same way for input array elements with
1323
+ // idx%2==1.
1324
+ __m256i a1 = _mm256_and_si256(a, keep_1);
1325
+ __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
1326
+ __m256i c1;
1327
+ if (left_shift)
1328
+ c1 = _mm256_sllv_epi32(a1, b1);
1329
+ else
1330
+ c1 = _mm256_srav_epi32(a1, b1);
1331
+ c1 = _mm256_and_si256(c1, keep_1);
1332
+
1333
+ // Merge partial results into the final result.
1334
+ __m256i c = _mm256_or_si256(c0, c1);
1335
+
1336
+ return c;
1337
+ }
1338
+
1339
+ template <bool left_shift, typename T, typename std::enable_if_t<std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value, int> = 0>
1340
+ Vectorized<T> inline shift_256_8(const Vectorized<T>& a, const Vectorized<T>& b) {
1341
+ // No vector instruction for shifting int8_t/uint8_t, so emulating
1342
+ // it instead.
1343
+
1344
+ // Control masks for shuffle operation, treating 256 bits as an
1345
+ // array of 8-bit elements, and considering quadruples of
1346
+ // neighboring elements. Specifially, a mask named "ctl_M_N" (M,N
1347
+ // in [0,1,2,3], and M!=N) is set so that shuffle will move element
1348
+ // with index M from input quadruple into element with index N in
1349
+ // output quadruple, and other elements in output quadruple will be
1350
+ // set to all 0s.
1351
+ __m256i ctl_0_3 = _mm256_set_epi8(28, 0x80, 0x80, 0x80, 24, 0x80, 0x80, 0x80,
1352
+ 20, 0x80, 0x80, 0x80, 16, 0x80, 0x80, 0x80,
1353
+ 12, 0x80, 0x80, 0x80, 8, 0x80, 0x80, 0x80,
1354
+ 4, 0x80, 0x80, 0x80, 0, 0x80, 0x80, 0x80);
1355
+ __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 29, 0x80, 0x80, 0x80, 25,
1356
+ 0x80, 0x80, 0x80, 21, 0x80, 0x80, 0x80, 17,
1357
+ 0x80, 0x80, 0x80, 13, 0x80, 0x80, 0x80, 9,
1358
+ 0x80, 0x80, 0x80, 5, 0x80, 0x80, 0x80, 1);
1359
+ __m256i ctl_1_3 = _mm256_set_epi8(29, 0x80, 0x80, 0x80, 25, 0x80, 0x80, 0x80,
1360
+ 21, 0x80, 0x80, 0x80, 17, 0x80, 0x80, 0x80,
1361
+ 13, 0x80, 0x80, 0x80, 9, 0x80, 0x80, 0x80,
1362
+ 5, 0x80, 0x80, 0x80, 1, 0x80, 0x80, 0x80);
1363
+ __m256i ctl_2_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 30, 0x80, 0x80, 0x80, 26,
1364
+ 0x80, 0x80, 0x80, 22, 0x80, 0x80, 0x80, 18,
1365
+ 0x80, 0x80, 0x80, 14, 0x80, 0x80, 0x80, 10,
1366
+ 0x80, 0x80, 0x80, 6, 0x80, 0x80, 0x80, 2);
1367
+ __m256i ctl_2_3 = _mm256_set_epi8(30, 0x80, 0x80, 0x80, 26, 0x80, 0x80, 0x80,
1368
+ 22, 0x80, 0x80, 0x80, 18, 0x80, 0x80, 0x80,
1369
+ 14, 0x80, 0x80, 0x80, 10, 0x80, 0x80, 0x80,
1370
+ 6, 0x80, 0x80, 0x80, 2, 0x80, 0x80, 0x80);
1371
+ __m256i ctl_3_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 31, 0x80, 0x80, 0x80, 27,
1372
+ 0x80, 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19,
1373
+ 0x80, 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11,
1374
+ 0x80, 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3);
1375
+ __m256i ctl_3_1 = _mm256_set_epi8(0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, 0x80,
1376
+ 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80,
1377
+ 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80,
1378
+ 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80);
1379
+ __m256i ctl_3_2 = _mm256_set_epi8(0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, 0x80,
1380
+ 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, 0x80,
1381
+ 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, 0x80,
1382
+ 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80, 0x80);
1383
+
1384
+ // Masks for bitwise and operation, treating 256 bits as an array of
1385
+ // 8-bit elements, and considering them in quadruples of neighboring
1386
+ // elements. A mask named "keep_M" (M in [0,1,2,3]) is set so that
1387
+ // bitwise and will copy element with index M from input quadruple
1388
+ // into element with the same index in output quadruple, while the
1389
+ // other elements in output quadruple will be set to all 0s.
1390
+ __m256i keep_0 = _mm256_set1_epi32(0xFF);
1391
+ __m256i keep_3 = _mm256_set1_epi32(0xFF000000);
1392
+
1393
+ // Take each 8-bit element with idx%4==0 from input array to be
1394
+ // shifted and extend it to 32 bits so that 0s are added to the
1395
+ // right. Then, perform shifting on this 32-bit number. Upper 8
1396
+ // bits will be proper result of shifting original 8-bit number, so
1397
+ // write them to result array, into the same position from which
1398
+ // corresponding input element is taken. Also, make sure that
1399
+ // result array elements with idx%4!=0 are set to all 0s.
1400
+ //
1401
+ // Note that number of bits to shift for is extended to 32 bits by
1402
+ // adding 0s to the left. That means this number is not properly
1403
+ // sign-extended for negative values. However, number of bits to
1404
+ // shift is treated as an unsigned integer by respective shift
1405
+ // intrinsics anyway so if negative then either with or without
1406
+ // proper sign extension, it will be interpreted as a number greater
1407
+ // than 32, and the shifting result will be the same.
1408
+ __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_3);
1409
+ __m256i b0 = _mm256_and_si256(b, keep_0);
1410
+ __m256i c0;
1411
+ if (left_shift)
1412
+ c0 = _mm256_sllv_epi32(a0, b0);
1413
+ else
1414
+ if constexpr (std::is_same_v<T, int8_t>)
1415
+ c0 = _mm256_srav_epi32(a0, b0);
1416
+ else
1417
+ c0 = _mm256_srlv_epi32(a0, b0);
1418
+ c0 = _mm256_shuffle_epi8(c0, ctl_3_0);
1419
+
1420
+ // Peform shifting the same way for input array elements with
1421
+ // idx%4==1.
1422
+ __m256i a1 = _mm256_shuffle_epi8(a, ctl_1_3);
1423
+ __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
1424
+ __m256i c1;
1425
+ if (left_shift)
1426
+ c1 = _mm256_sllv_epi32(a1, b1);
1427
+ else
1428
+ if constexpr (std::is_same_v<T, int8_t>)
1429
+ c1 = _mm256_srav_epi32(a1, b1);
1430
+ else
1431
+ c1 = _mm256_srlv_epi32(a1, b1);
1432
+ c1 = _mm256_shuffle_epi8(c1, ctl_3_1);
1433
+
1434
+ // Peform shifting the same way for input array elements with
1435
+ // idx%4==2.
1436
+ __m256i a2 = _mm256_shuffle_epi8(a, ctl_2_3);
1437
+ __m256i b2 = _mm256_shuffle_epi8(b, ctl_2_0);
1438
+ __m256i c2;
1439
+ if (left_shift)
1440
+ c2 = _mm256_sllv_epi32(a2, b2);
1441
+ else
1442
+ if constexpr (std::is_same_v<T, int8_t>)
1443
+ c2 = _mm256_srav_epi32(a2, b2);
1444
+ else
1445
+ c2 = _mm256_srlv_epi32(a2, b2);
1446
+ c2 = _mm256_shuffle_epi8(c2, ctl_3_2);
1447
+
1448
+ // Peform shifting the same way for input array elements with
1449
+ // idx%4==3.
1450
+ __m256i a3 = _mm256_and_si256(a, keep_3);
1451
+ __m256i b3 = _mm256_shuffle_epi8(b, ctl_3_0);
1452
+ __m256i c3;
1453
+ if (left_shift)
1454
+ c3 = _mm256_sllv_epi32(a3, b3);
1455
+ else
1456
+ if constexpr (std::is_same_v<T, int8_t>)
1457
+ c3 = _mm256_srav_epi32(a3, b3);
1458
+ else
1459
+ c3 = _mm256_srlv_epi32(a3, b3);
1460
+ c3 = _mm256_and_si256(c3, keep_3);
1461
+
1462
+ // Merge partial results into the final result.
1463
+ __m256i c01 = _mm256_or_si256(c0, c1);
1464
+ __m256i c23 = _mm256_or_si256(c2, c3);
1465
+ __m256i c = _mm256_or_si256(c01, c23);
1466
+
1467
+ return c;
1468
+ }
1469
+
1470
+ template <>
1471
+ Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1472
+ return _mm256_sllv_epi64(a, b);
1473
+ }
1474
+
1475
+ template <>
1476
+ Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1477
+ return _mm256_sllv_epi32(a, b);
1478
+ }
1479
+
1480
+ template <>
1481
+ Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1482
+ return shift_256_16<true>(a, b);
1483
+ }
1484
+
1485
+ template <>
1486
+ Vectorized<int8_t> inline operator<<(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1487
+ return shift_256_8<true>(a, b);
1488
+ }
1489
+
1490
+ template <>
1491
+ Vectorized<uint8_t> inline operator<<(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1492
+ return shift_256_8<true>(a, b);
1493
+ }
1494
+
1495
+ template <>
1496
+ Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1497
+ // No vector instruction for right arithmetic shifting int64_t, so emulating it
1498
+ // instead.
1499
+
1500
+ // Clamp the shift values such that shift values < 0 and > 64 are changed to 64
1501
+ // which results in -1 for negative input and 0 for non-negative input.
1502
+ __m256i zero = _mm256_set1_epi64x(0);
1503
+ __m256i max_shift = _mm256_set1_epi64x(64);
1504
+ __m256i mask = _mm256_or_si256(_mm256_cmpgt_epi64(zero, b), _mm256_cmpgt_epi64(b, max_shift));
1505
+ __m256i shift = _mm256_blendv_epi8(b, max_shift, mask);
1506
+ // Shift the number logically to the right, thus filling the most
1507
+ // significant bits with 0s. Then, replace these bits with the sign
1508
+ // bit.
1509
+ __m256i sign_bits = _mm256_cmpgt_epi64(zero, a);
1510
+ __m256i sign_shift = _mm256_sub_epi64(max_shift, shift);
1511
+ __m256i sign_ext = _mm256_sllv_epi64(sign_bits, sign_shift);
1512
+ __m256i c = _mm256_srlv_epi64(a, shift);
1513
+ c = _mm256_or_si256(c, sign_ext);
1514
+
1515
+ return c;
1516
+ }
1517
+
1518
+ template <>
1519
+ Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1520
+ return _mm256_srav_epi32(a, b);
1521
+ }
1522
+
1523
+ template <>
1524
+ Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1525
+ return shift_256_16<false>(a, b);
1526
+ }
1527
+
1528
+ template <>
1529
+ Vectorized<int8_t> inline operator>>(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1530
+ return shift_256_8<false>(a, b);
1531
+ }
1532
+
1533
+ template <>
1534
+ Vectorized<uint8_t> inline operator>>(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1535
+ return shift_256_8<false>(a, b);
1536
+ }
1537
+
1538
+ #endif
1539
+
1540
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h ADDED
@@ -0,0 +1,1327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <ATen/native/quantized/AffineQuantizerBase.h>
9
+
10
+ #include <c10/util/irange.h>
11
+ #include <c10/util/qint32.h>
12
+ #include <c10/util/qint8.h>
13
+ #include <c10/util/quint8.h>
14
+
15
+ #include <array>
16
+ #include <cmath>
17
+
18
+ // This file defines Vectorized<> for the quantized types.
19
+ //
20
+ //
21
+ // Currently, we simply use these classes as efficient converters between
22
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
23
+ // where doing the arithmetic in full-precision is acceptable (e.g.
24
+ // elementwise operators).
25
+ //
26
+ //
27
+ // Conversions are as follows:
28
+ // Vectorized<qint8> -> 4x Vectorized<float>
29
+ // Vectorized<quint8> -> 4x Vectorized<float>
30
+ // Vectorized<qint32> -> 1x Vectorized<float>
31
+ //
32
+ // The size of the returned float vector is specified by the special
33
+ // constexpr function float_num_vecs. The type of the value returned
34
+ // from dequantize (and expected as an argument to quantize) is
35
+ // specified by float_vec_return_type.
36
+ //
37
+ // When writing kernels with these vectors, it is expected that floating-
38
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
39
+ // iterations.
40
+
41
+ namespace at::vec {
42
+ inline namespace CPU_CAPABILITY {
43
+
44
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
45
+
46
+ struct Vectorizedqi {
47
+ protected:
48
+ __m256i vals __attribute__((aligned(64)));
49
+
50
+ public:
51
+ Vectorizedqi() {}
52
+ Vectorizedqi(__m256i v) : vals(v) {}
53
+ operator __m256i() const {
54
+ return vals;
55
+ }
56
+ };
57
+
58
+ template <typename T>
59
+ __m256i pack_saturate_and_clamp(
60
+ __m256i first,
61
+ __m256i second,
62
+ T min_val,
63
+ T max_val);
64
+
65
+ template <>
66
+ inline __m256i pack_saturate_and_clamp<int32_t>(
67
+ __m256i /*first*/,
68
+ __m256i /*second*/,
69
+ int32_t /*min_val*/,
70
+ int32_t /*max_val*/) {
71
+ // This function is for linkage only, will not be used
72
+ AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported");
73
+ }
74
+
75
+ template <>
76
+ inline __m256i pack_saturate_and_clamp<int8_t>(
77
+ __m256i first,
78
+ __m256i second,
79
+ int8_t min_val,
80
+ int8_t max_val) {
81
+ __m256i packed_and_sat = _mm256_packs_epi16(first, second);
82
+ return _mm256_max_epi8(
83
+ _mm256_set1_epi8(min_val),
84
+ _mm256_min_epi8(packed_and_sat, _mm256_set1_epi8(max_val)));
85
+ }
86
+
87
+ template <>
88
+ inline __m256i pack_saturate_and_clamp<uint8_t>(
89
+ __m256i first,
90
+ __m256i second,
91
+ uint8_t min_val,
92
+ uint8_t max_val) {
93
+ __m256i packed_and_sat = _mm256_packus_epi16(first, second);
94
+ return _mm256_max_epu8(
95
+ _mm256_set1_epi8(min_val),
96
+ _mm256_min_epu8(packed_and_sat, _mm256_set1_epi8(max_val)));
97
+ }
98
+
99
+ inline Vectorized<float> convert_uint8_to_float(at::vec::Vectorized<uint8_t> src) {
100
+ // Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
101
+ // Only handle first 64 bits
102
+ __m128i input_128 = _mm256_castsi256_si128(src);
103
+ // Convert from 8*uint8 to 8*int32
104
+ __m256i input_256_int32 = _mm256_cvtepu8_epi32(input_128);
105
+ // Convert from 8*int32 to 8*float
106
+ return _mm256_cvtepi32_ps(input_256_int32);
107
+ }
108
+
109
+ inline Vectorized<uint8_t> convert_float_to_uint8(at::vec::Vectorized<float> src) {
110
+ // Convert from float32 to int32 with truncation
111
+ __m256i x_values_int32 = _mm256_cvttps_epi32(src);
112
+
113
+ // Convert from int32 to int16 using signed saturation
114
+ __m256i xy_packed_v = _mm256_packs_epi32(x_values_int32, x_values_int32);
115
+
116
+ constexpr auto min_val = std::numeric_limits<uint8_t>::min();
117
+ constexpr auto max_val = std::numeric_limits<uint8_t>::max();
118
+
119
+ // Convert from int16 to uint8 using unsigned saturation
120
+ __m256i xyzw_clamped_v = pack_saturate_and_clamp<uint8_t>(
121
+ xy_packed_v, xy_packed_v, min_val, max_val);
122
+ __m256i permute_mask_v =
123
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
124
+ return _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
125
+ }
126
+
127
+ template <typename T>
128
+ inline void __attribute__((always_inline)) QuantizeAvx2(
129
+ const float* src,
130
+ T* dst,
131
+ int len,
132
+ float inverse_scale,
133
+ int64_t zero_point) {
134
+ constexpr int VLEN = 8;
135
+ constexpr auto min_val = std::numeric_limits<T>::min();
136
+ constexpr auto max_val = std::numeric_limits<T>::max();
137
+ const __m256i min_v = _mm256_set1_epi32(min_val);
138
+ const __m256i max_v = _mm256_set1_epi32(max_val);
139
+ // This is the largest int32 value < int32_max exactly representable in float
140
+ constexpr int32_t int32_float_max_val =
141
+ std::numeric_limits<int32_t>::max() - 127;
142
+ int i = 0;
143
+ __m256 inverse_scale_v = _mm256_set1_ps(inverse_scale);
144
+ // clang-format off
145
+ static const __m256i shuffle_mask_v = _mm256_set_epi8(
146
+ 0xff, 0xff, 0xff, 0xff,
147
+ 0xff, 0xff, 0xff, 0xff,
148
+ 0xff, 0xff, 0xff, 0xff,
149
+ 0x0c, 0x08, 0x04, 0x00,
150
+ 0xff, 0xff, 0xff, 0xff,
151
+ 0xff, 0xff, 0xff, 0xff,
152
+ 0xff, 0xff, 0xff, 0xff,
153
+ 0x0c, 0x08, 0x04, 0x00);
154
+ // clang-format on
155
+ __m256i permute_mask_v =
156
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
157
+ __m256i permute_mask_l8_v =
158
+ _mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00);
159
+ int len_aligned = len / (VLEN * 4) * (VLEN * 4);
160
+ for (; i < len_aligned; i += 4 * VLEN) {
161
+ // x
162
+ __m256 x_vals = _mm256_load_ps(src + i);
163
+ __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
164
+ // If the floating point value is greater than int32_max,
165
+ // _mm256_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to
166
+ // Clip at int32_float_max_val to avoid this.
167
+ x_transformed_v =
168
+ _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
169
+ // y
170
+ __m256 y_vals = _mm256_load_ps(src + i + VLEN);
171
+ __m256 y_transformed_v = _mm256_mul_ps(y_vals, inverse_scale_v);
172
+ y_transformed_v =
173
+ _mm256_min_ps(y_transformed_v, _mm256_set1_ps(int32_float_max_val));
174
+ // z
175
+ __m256 z_vals = _mm256_load_ps(src + i + 2 * VLEN);
176
+ __m256 z_transformed_v = _mm256_mul_ps(z_vals, inverse_scale_v);
177
+ z_transformed_v =
178
+ _mm256_min_ps(z_transformed_v, _mm256_set1_ps(int32_float_max_val));
179
+ // w
180
+ __m256 w_vals = _mm256_load_ps(src + i + 3 * VLEN);
181
+ __m256 w_transformed_v = _mm256_mul_ps(w_vals, inverse_scale_v);
182
+ w_transformed_v =
183
+ _mm256_min_ps(w_transformed_v, _mm256_set1_ps(int32_float_max_val));
184
+
185
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
186
+ __m256i y_rounded_v = _mm256_cvtps_epi32(y_transformed_v);
187
+ __m256i z_rounded_v = _mm256_cvtps_epi32(z_transformed_v);
188
+ __m256i w_rounded_v = _mm256_cvtps_epi32(w_transformed_v);
189
+
190
+ // add zero point
191
+ x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
192
+ y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point));
193
+ z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point));
194
+ w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point));
195
+
196
+ __m256i xy_packed_v = _mm256_packs_epi32(x_rounded_v, y_rounded_v);
197
+ __m256i zw_packed_v = _mm256_packs_epi32(z_rounded_v, w_rounded_v);
198
+ __m256i xyzw_clamped_v =
199
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
200
+
201
+ xyzw_clamped_v =
202
+ _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
203
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst + i), xyzw_clamped_v);
204
+ }
205
+
206
+ // Additional 8-lane AVX2 version to take advantage when len is smaller
207
+ // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM)
208
+ for (; i < len / VLEN * VLEN; i += VLEN) {
209
+ __m256 x_vals = _mm256_load_ps(src + i);
210
+ __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
211
+ x_transformed_v =
212
+ _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
213
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
214
+ x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
215
+ __m256i x_clipped_v =
216
+ _mm256_max_epi32(min_v, _mm256_min_epi32(max_v, x_rounded_v));
217
+
218
+ x_clipped_v = _mm256_shuffle_epi8(x_clipped_v, shuffle_mask_v);
219
+ x_clipped_v = _mm256_permutevar8x32_epi32(x_clipped_v, permute_mask_l8_v);
220
+ _mm_storel_epi64(
221
+ reinterpret_cast<__m128i*>(dst + i),
222
+ _mm256_castsi256_si128(x_clipped_v));
223
+ }
224
+
225
+ for (; i < len; ++i) {
226
+ float transformed = src[i] * inverse_scale;
227
+
228
+ // Not exactly the same behavior as the vectorized code.
229
+ // The vectorized code above always rounds to even in halfway cases
230
+ // (https://software.intel.com/en-us/node/523819), but std::nearbyint
231
+ // does the same only when the current rounding mode is FE_TONEAREST.
232
+ // However, in practice, this should not be a problem because most cases
233
+ // use the default rounding mode FE_TONEAREST.
234
+ // Note that we cannot implement the same behavior as the vectorized code
235
+ // using std::round because it does rounding away from zero in halfway
236
+ // cases.
237
+ transformed = zero_point + std::nearbyint(transformed);
238
+ float clipped =
239
+ std::min(std::max(transformed, float(min_val)), float(max_val));
240
+ dst[i] = clipped;
241
+ }
242
+ }
243
+
244
+ template<>
245
+ struct Vectorized<c10::qint32> : public Vectorizedqi {
246
+ using size_type = int;
247
+ static constexpr size_type size() {
248
+ return 8;
249
+ }
250
+
251
+ static constexpr int float_num_vecs() {
252
+ return 1;
253
+ }
254
+
255
+ static constexpr int int_num_vecs() {
256
+ return 1;
257
+ }
258
+
259
+ using float_vec_return_type = std::array<Vectorized<float>, 1>;
260
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
261
+ using value_type = c10::qint32::underlying;
262
+
263
+ public:
264
+ using Vectorizedqi::Vectorizedqi;
265
+ Vectorized() {}
266
+
267
+ Vectorized(__m256i vals_) { vals = vals_;}
268
+
269
+ // Broadcast constructor
270
+ Vectorized(const c10::qint32& val) {
271
+ value_type uw = val.val_;
272
+ vals = _mm256_set1_epi32(uw);
273
+ }
274
+
275
+ void store(void* ptr, int count = size()) const {
276
+ if (count != size()) {
277
+ memcpy(ptr, &vals, count * sizeof(value_type));
278
+ } else {
279
+ _mm256_storeu_si256((__m256i*)ptr, vals);
280
+ }
281
+ }
282
+
283
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
284
+ return Vectorized<c10::qint32>(ptr);
285
+ }
286
+
287
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
288
+ __at_align__ value_type tmp_values[size()];
289
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
290
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
291
+ // instructions while a loop would be compiled to one instruction.
292
+ for (const auto i : c10::irange(size())) {
293
+ tmp_values[i] = 0;
294
+ }
295
+ std::memcpy(
296
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
297
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
298
+ }
299
+
300
+ float_vec_return_type dequantize(
301
+ Vectorized<float> scale,
302
+ Vectorized<float> /*zero_point*/,
303
+ Vectorized<float> scale_zp_premul) const {
304
+ __m256 float_vals = _mm256_cvtepi32_ps(vals);
305
+ return {vec::fmadd(scale, Vectorized<float>(float_vals), scale_zp_premul)};
306
+ }
307
+
308
+ float_vec_return_type dequantize(
309
+ Vectorized<float> scale,
310
+ Vectorized<float> zero_point) const {
311
+ __m256 float_vals = _mm256_cvtepi32_ps(vals);
312
+ return {(Vectorized<float>(float_vals) - zero_point) * scale};
313
+ }
314
+
315
+ static Vectorized<c10::qint32> quantize(
316
+ const float_vec_return_type& rhs,
317
+ float scale,
318
+ int32_t zero_point,
319
+ float /*inverse_scale*/) {
320
+ Vectorized<c10::qint32> retval;
321
+ auto rhs_data = (__m256)rhs[0];
322
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
323
+ scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 8);
324
+ return retval;
325
+ }
326
+
327
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
328
+ return _mm256_max_epi32(vals, b.vals);
329
+ }
330
+
331
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
332
+ return _mm256_min_epi32(vals, b.vals);
333
+ }
334
+
335
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
336
+ return maximum(zero_point);
337
+ }
338
+
339
+ Vectorized<c10::qint32> relu6(
340
+ Vectorized<c10::qint32> zero_point,
341
+ Vectorized<c10::qint32> q_six) {
342
+ return _mm256_min_epi32(
343
+ _mm256_max_epi32(vals, zero_point.vals), q_six.vals);
344
+ }
345
+
346
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
347
+ return {_mm256_sub_epi32(vals, b)};
348
+ }
349
+
350
+ static Vectorized<c10::qint32> requantize_from_int(
351
+ const int_vec_return_type& inp,
352
+ float multiplier,
353
+ int32_t zero_point) {
354
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
355
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
356
+
357
+ __m256 scaled = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier_v);
358
+ __m256i rounded = _mm256_cvtps_epi32(scaled);
359
+ return _mm256_add_epi32(rounded, zero_point_v);
360
+ }
361
+
362
+ private:
363
+ // Load from memory constructor
364
+ Vectorized(const void* ptr) {
365
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
366
+ }
367
+ };
368
+
369
+ template <>
370
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
371
+ return a.maximum(b);
372
+ }
373
+
374
+ template <>
375
+ Vectorized<c10::qint32> inline operator*(
376
+ const Vectorized<c10::qint32>& a,
377
+ const Vectorized<c10::qint32>& b) {
378
+ return _mm256_mullo_epi32(a, b);
379
+ }
380
+
381
+ template <>
382
+ Vectorized<c10::qint32> inline operator+(
383
+ const Vectorized<c10::qint32>& a,
384
+ const Vectorized<c10::qint32>& b) {
385
+ return _mm256_add_epi32(a, b);
386
+ }
387
+
388
+ /*
389
+ * Convert values from int32 back to int8/uint8
390
+ */
391
+ template <typename T>
392
+ __m256i RequantizeAvx2(
393
+ const std::array<Vectorized<c10::qint32>, 4>& inp,
394
+ __m256 multiplier,
395
+ __m256i zp) {
396
+ static_assert(
397
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
398
+ "Only int8_t/uint8_t are supported");
399
+ constexpr auto min_val = std::numeric_limits<T>::min();
400
+ constexpr auto max_val = std::numeric_limits<T>::max();
401
+ __m256i permute_mask_v =
402
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
403
+ __m256 x_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier);
404
+ __m256 y_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[1]), multiplier);
405
+ __m256 z_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[2]), multiplier);
406
+ __m256 w_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[3]), multiplier);
407
+
408
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_scaled_v);
409
+ __m256i y_rounded_v = _mm256_cvtps_epi32(y_scaled_v);
410
+ __m256i z_rounded_v = _mm256_cvtps_epi32(z_scaled_v);
411
+ __m256i w_rounded_v = _mm256_cvtps_epi32(w_scaled_v);
412
+
413
+ /* Add zero point */
414
+ __m256i x_v = _mm256_add_epi32(x_rounded_v, zp);
415
+ __m256i y_v = _mm256_add_epi32(y_rounded_v, zp);
416
+ __m256i z_v = _mm256_add_epi32(z_rounded_v, zp);
417
+ __m256i w_v = _mm256_add_epi32(w_rounded_v, zp);
418
+
419
+ /* Pack to int16_t and saturate */
420
+ __m256i xy_packed_v = _mm256_packs_epi32(x_v, y_v);
421
+ __m256i zw_packed_v = _mm256_packs_epi32(z_v, w_v);
422
+
423
+ __m256i xyzw_clamped_v =
424
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
425
+
426
+ /*
427
+ * xyzw_clamped_v has results in the following layout so we need to
428
+ * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7
429
+ */
430
+ xyzw_clamped_v = _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
431
+ return xyzw_clamped_v;
432
+ }
433
+
434
+ template<>
435
+ struct Vectorized<c10::qint8> : public Vectorizedqi {
436
+ static constexpr int size() {
437
+ return 32;
438
+ }
439
+
440
+ static constexpr int float_num_vecs() {
441
+ return 4;
442
+ }
443
+
444
+ static constexpr int int_num_vecs() {
445
+ return 4;
446
+ }
447
+
448
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
449
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
450
+ using value_type = typename c10::qint8::underlying;
451
+
452
+ public:
453
+ using Vectorizedqi::Vectorizedqi;
454
+
455
+ Vectorized() {}
456
+ Vectorized(__m256i vals_) { vals = vals_;}
457
+
458
+ // Broadcast constructor
459
+ Vectorized(const c10::qint8& val) {
460
+ value_type uw = val.val_;
461
+ vals = _mm256_set1_epi8(uw);
462
+ }
463
+
464
+ // This is needed because the compiler emits awful code for the default
465
+ // constructor for moving the enum
466
+ // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
467
+ C10_CLANG_DIAGNOSTIC_PUSH()
468
+ #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
469
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
470
+ #endif
471
+ Vectorized(const Vectorized<c10::qint8>& other) : Vectorizedqi(other.vals) { }
472
+ C10_CLANG_DIAGNOSTIC_POP()
473
+
474
+ void store(void* ptr, int count = size()) const {
475
+ if (count != size()) {
476
+ memcpy(ptr, &vals, count * sizeof(value_type));
477
+ } else {
478
+ _mm256_storeu_si256((__m256i*)ptr, vals);
479
+ }
480
+ }
481
+
482
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
483
+ return Vectorized<c10::qint8>(ptr);
484
+ }
485
+
486
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
487
+ __at_align__ value_type tmp_values[size()];
488
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
489
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
490
+ // instructions while a loop would be compiled to one instruction.
491
+ for (const auto i : c10::irange(size())) {
492
+ tmp_values[i] = 0;
493
+ }
494
+ std::memcpy(
495
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
496
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
497
+ }
498
+
499
+ private:
500
+ __m256i cvtepi8_epi32(__m128i epi8_vals) const {
501
+ return _mm256_cvtepi8_epi32(epi8_vals);
502
+ }
503
+
504
+ public:
505
+ float_vec_return_type dequantize(
506
+ Vectorized<float> scale,
507
+ Vectorized<float> /*zero_point*/,
508
+ Vectorized<float> scale_neg_zp_premul) const {
509
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
510
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
511
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
512
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
513
+
514
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
515
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
516
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
517
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
518
+
519
+ auto val0 =
520
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_neg_zp_premul);
521
+ auto val1 =
522
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_neg_zp_premul);
523
+ auto val2 =
524
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_neg_zp_premul);
525
+ auto val3 =
526
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_neg_zp_premul);
527
+ return {val0, val1, val2, val3};
528
+ }
529
+
530
+ float_vec_return_type dequantize(
531
+ Vectorized<float> scale,
532
+ Vectorized<float> zero_point) const {
533
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
534
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
535
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
536
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
537
+
538
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
539
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
540
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
541
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
542
+
543
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
544
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
545
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
546
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
547
+ return {val0, val1, val2, val3};
548
+ }
549
+
550
+ static Vectorized<c10::qint8> quantize(
551
+ const float_vec_return_type& rhs,
552
+ float /*scale*/,
553
+ int32_t zero_point,
554
+ float inverse_scale) {
555
+ auto* rhs_data = (float*)rhs.data();
556
+ int8_t quantized_values[32];
557
+ QuantizeAvx2<value_type>(
558
+ rhs_data, quantized_values, 32, inverse_scale, zero_point);
559
+ return Vectorized<c10::qint8>::loadu(quantized_values);
560
+ }
561
+
562
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
563
+ return _mm256_max_epi8(vals, b.vals);
564
+ }
565
+
566
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
567
+ return _mm256_min_epi8(vals, b.vals);
568
+ }
569
+
570
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
571
+ return maximum(zero_point);
572
+ }
573
+
574
+ Vectorized<c10::qint8> relu6(
575
+ Vectorized<c10::qint8> zero_point,
576
+ Vectorized<c10::qint8> q_six) {
577
+ return _mm256_min_epi8(
578
+ _mm256_max_epi8(vals, zero_point.vals), q_six.vals);
579
+ }
580
+
581
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
582
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
583
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
584
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
585
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
586
+
587
+ __m256i int32_val0 = cvtepi8_epi32(int_val0);
588
+ __m256i int32_val1 = cvtepi8_epi32(int_val1);
589
+ __m256i int32_val2 = cvtepi8_epi32(int_val2);
590
+ __m256i int32_val3 = cvtepi8_epi32(int_val3);
591
+
592
+ __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
593
+ __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
594
+ __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
595
+ __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
596
+
597
+ __m256i int32_b0 = cvtepi8_epi32(int_b0);
598
+ __m256i int32_b1 = cvtepi8_epi32(int_b1);
599
+ __m256i int32_b2 = cvtepi8_epi32(int_b2);
600
+ __m256i int32_b3 = cvtepi8_epi32(int_b3);
601
+
602
+ __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
603
+ __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
604
+ __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
605
+ __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
606
+
607
+ return {Vectorized<c10::qint32>(res_0),
608
+ Vectorized<c10::qint32>(res_1),
609
+ Vectorized<c10::qint32>(res_2),
610
+ Vectorized<c10::qint32>(res_3)};
611
+ }
612
+
613
+ static Vectorized<c10::qint8> requantize_from_int(
614
+ const int_vec_return_type& inp,
615
+ float multiplier,
616
+ int32_t zero_point) {
617
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
618
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
619
+ return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
620
+ }
621
+
622
+ private:
623
+ // Load from memory constructor
624
+ Vectorized(const void* ptr) {
625
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
626
+ }
627
+ };
628
+
629
+ template <>
630
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
631
+ return a.maximum(b);
632
+ }
633
+
634
+ template<>
635
+ struct Vectorized<c10::quint8> : public Vectorizedqi {
636
+ static constexpr int size() {
637
+ return 32;
638
+ }
639
+
640
+ static constexpr int float_num_vecs() {
641
+ return 4;
642
+ }
643
+
644
+ static constexpr int int_num_vecs() {
645
+ return 4;
646
+ }
647
+
648
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
649
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
650
+ using value_type = typename c10::quint8::underlying;
651
+
652
+ public:
653
+ using Vectorizedqi::Vectorizedqi;
654
+ Vectorized() {}
655
+
656
+ Vectorized(__m256i vals_) { vals = vals_;}
657
+
658
+ // Broadcast constructor
659
+ Vectorized(const c10::quint8& val) {
660
+ value_type uw = val.val_;
661
+ vals = _mm256_set1_epi8(uw);
662
+ }
663
+
664
+ // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
665
+ C10_CLANG_DIAGNOSTIC_PUSH()
666
+ #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
667
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
668
+ #endif
669
+ Vectorized(const Vectorized<c10::quint8>& other) : Vectorizedqi(other.vals) { }
670
+ C10_CLANG_DIAGNOSTIC_POP()
671
+
672
+ void store(void* ptr, int count = size()) const {
673
+ if (count != size()) {
674
+ memcpy(ptr, &vals, count * sizeof(value_type));
675
+ } else {
676
+ _mm256_storeu_si256((__m256i*)ptr, vals);
677
+ }
678
+ }
679
+
680
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
681
+ return Vectorized<c10::quint8>(ptr);
682
+ }
683
+
684
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
685
+ __at_align__ value_type tmp_values[size()];
686
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
687
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
688
+ // instructions while a loop would be compiled to one instruction.
689
+ for (const auto i : c10::irange(size())) {
690
+ tmp_values[i] = 0;
691
+ }
692
+ std::memcpy(
693
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
694
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
695
+ }
696
+
697
+ private:
698
+ __m256i cvtepu8_epi32(__m128i epu8_vals) const {
699
+ return _mm256_cvtepu8_epi32(epu8_vals);
700
+ }
701
+
702
+ public:
703
+ float_vec_return_type dequantize(
704
+ Vectorized<float> scale,
705
+ Vectorized<float> /*zero_point*/,
706
+ Vectorized<float> scale_zp_premul) const {
707
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
708
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
709
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
710
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
711
+
712
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
713
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
714
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
715
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
716
+
717
+ auto val0 =
718
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_zp_premul);
719
+ auto val1 =
720
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_zp_premul);
721
+ auto val2 =
722
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_zp_premul);
723
+ auto val3 =
724
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_zp_premul);
725
+ return {val0, val1, val2, val3};
726
+ }
727
+
728
+ float_vec_return_type dequantize(
729
+ Vectorized<float> scale,
730
+ Vectorized<float> zero_point) const {
731
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
732
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
733
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
734
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
735
+
736
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
737
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
738
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
739
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
740
+
741
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
742
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
743
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
744
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
745
+ return {val0, val1, val2, val3};
746
+ }
747
+
748
+ static Vectorized<c10::quint8> quantize(
749
+ const float_vec_return_type& rhs,
750
+ float /*scale*/,
751
+ int32_t zero_point,
752
+ float inverse_scale) {
753
+ auto* rhs_data = (float*)rhs.data();
754
+ uint8_t quantized_values[32];
755
+ QuantizeAvx2<value_type>(
756
+ rhs_data, quantized_values, 32, inverse_scale, zero_point);
757
+ return Vectorized<c10::quint8>::loadu(quantized_values);
758
+ }
759
+
760
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
761
+ return _mm256_max_epu8(vals, b.vals);
762
+ }
763
+
764
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
765
+ return _mm256_min_epu8(vals, b.vals);
766
+ }
767
+
768
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
769
+ return maximum(zero_point);
770
+ }
771
+
772
+ Vectorized<c10::quint8> relu6(
773
+ Vectorized<c10::quint8> zero_point,
774
+ Vectorized<c10::quint8> q_six) {
775
+ return _mm256_min_epu8(
776
+ _mm256_max_epu8(vals, zero_point.vals), q_six.vals);
777
+ }
778
+
779
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
780
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
781
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
782
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
783
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
784
+
785
+ __m256i int32_val0 = cvtepu8_epi32(int_val0);
786
+ __m256i int32_val1 = cvtepu8_epi32(int_val1);
787
+ __m256i int32_val2 = cvtepu8_epi32(int_val2);
788
+ __m256i int32_val3 = cvtepu8_epi32(int_val3);
789
+
790
+ __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
791
+ __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
792
+ __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
793
+ __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
794
+
795
+ __m256i int32_b0 = cvtepu8_epi32(int_b0);
796
+ __m256i int32_b1 = cvtepu8_epi32(int_b1);
797
+ __m256i int32_b2 = cvtepu8_epi32(int_b2);
798
+ __m256i int32_b3 = cvtepu8_epi32(int_b3);
799
+
800
+ __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
801
+ __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
802
+ __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
803
+ __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
804
+ return {Vectorized<c10::qint32>(res_0),
805
+ Vectorized<c10::qint32>(res_1),
806
+ Vectorized<c10::qint32>(res_2),
807
+ Vectorized<c10::qint32>(res_3)};
808
+ }
809
+
810
+ static Vectorized<c10::quint8> requantize_from_int(
811
+ const int_vec_return_type& inp,
812
+ float multiplier,
813
+ int32_t zero_point) {
814
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
815
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
816
+ return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
817
+ }
818
+
819
+ private:
820
+
821
+ // Load from memory constructor
822
+ Vectorized(const void* ptr) {
823
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
824
+ }
825
+ };
826
+
827
+ template <>
828
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
829
+ return a.maximum(b);
830
+ }
831
+
832
+ #else
833
+
834
+ // NOTE: These are low-performance implementations that we fall back on
835
+ // if we are not building with AVX2. This may not be an issue, because
836
+ // currently for quantization we assume the user has at least AVX512
837
+ // installed, so these can simply act as a reference implementation.
838
+ //
839
+ // If in the future we relax this requirement (AVX2+), we should probably
840
+ // revisit these implementations
841
+
842
+ template <
843
+ typename T,
844
+ typename float_vec_return_type_,
845
+ typename int_vec_return_type_,
846
+ int size_>
847
+ struct VectorizedQuantizedConverter {
848
+ static constexpr int size() {
849
+ return size_;
850
+ }
851
+
852
+ static constexpr int float_num_vecs() {
853
+ return size() / 8;
854
+ }
855
+
856
+ static constexpr int int_num_vecs() {
857
+ return size() / 8;
858
+ }
859
+
860
+ using float_vec_return_type = float_vec_return_type_;
861
+ using int_vec_return_type = int_vec_return_type_;
862
+
863
+ using value_type = typename T::underlying;
864
+ std::array<value_type, size_> vals;
865
+
866
+ VectorizedQuantizedConverter(T val) {
867
+ for (const auto i : c10::irange(size())) {
868
+ vals[i] = val.val_;
869
+ }
870
+ }
871
+
872
+ VectorizedQuantizedConverter(const void* ptr) {
873
+ memcpy(vals.data(), ptr, sizeof(value_type) * size());
874
+ }
875
+
876
+ void store(void* ptr, int count = size()) const {
877
+ memcpy(ptr, vals.data(), count * sizeof(value_type));
878
+ }
879
+
880
+ float_vec_return_type dequantize(
881
+ Vectorized<float> scale,
882
+ Vectorized<float> zero_point,
883
+ Vectorized<float> /*scale_zp_premul*/) const {
884
+ float_vec_return_type rv;
885
+ for (const auto i : c10::irange(float_num_vecs())) {
886
+ float tmp_vals[8];
887
+ for (const auto j : c10::irange(8)) {
888
+ tmp_vals[j] = at::native::dequantize_val<T>(
889
+ scale[j], zero_point[j], T(vals[8 * i + j]));
890
+ }
891
+ rv[i] = Vectorized<float>(tmp_vals[0],
892
+ tmp_vals[1],
893
+ tmp_vals[2],
894
+ tmp_vals[3],
895
+ tmp_vals[4],
896
+ tmp_vals[5],
897
+ tmp_vals[6],
898
+ tmp_vals[7]);
899
+ }
900
+ return rv;
901
+ }
902
+
903
+ float_vec_return_type dequantize(
904
+ Vectorized<float> scale,
905
+ Vectorized<float> zero_point) const {
906
+ Vectorized<float> scale_zp_premul;
907
+ return dequantize(scale, zero_point, scale_zp_premul);
908
+ }
909
+
910
+ protected:
911
+ VectorizedQuantizedConverter() {}
912
+ };
913
+
914
+ template <>
915
+ struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter<
916
+ c10::qint32,
917
+ std::array<Vectorized<float>, 1>,
918
+ std::array<Vectorized<c10::qint32>, 1>,
919
+ 8> {
920
+ Vectorized()
921
+ : VectorizedQuantizedConverter<
922
+ c10::qint32,
923
+ std::array<Vectorized<float>, 1>,
924
+ std::array<Vectorized<c10::qint32>, 1>,
925
+ 8>() {}
926
+ Vectorized(c10::qint32 val)
927
+ : VectorizedQuantizedConverter<
928
+ c10::qint32,
929
+ std::array<Vectorized<float>, 1>,
930
+ std::array<Vectorized<c10::qint32>, 1>,
931
+ 8>(val) {}
932
+ Vectorized(const void* ptr)
933
+ : VectorizedQuantizedConverter<
934
+ c10::qint32,
935
+ std::array<Vectorized<float>, 1>,
936
+ std::array<Vectorized<c10::qint32>, 1>,
937
+ 8>(ptr) {}
938
+
939
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
940
+ return Vectorized<c10::qint32>(ptr);
941
+ }
942
+
943
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
944
+ __at_align__ value_type tmp_values[size()];
945
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
946
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
947
+ // instructions while a loop would be compiled to one instruction.
948
+ for (const auto i : c10::irange(size())) {
949
+ tmp_values[i] = 0;
950
+ }
951
+ std::memcpy(
952
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
953
+ return Vectorized<c10::qint32>(tmp_values);
954
+ }
955
+
956
+ static Vectorized<c10::qint32> quantize(
957
+ const float_vec_return_type& rhs,
958
+ float scale,
959
+ int32_t zero_point,
960
+ float /*inverse_scale*/) {
961
+ std::array<value_type, size()> qvals;
962
+ std::array<float, float_num_vecs() * 8> float_vals;
963
+
964
+ for (const auto i : c10::irange(float_num_vecs())) {
965
+ rhs[i].store(&float_vals[i * 8], 8);
966
+ }
967
+
968
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
969
+ scale,
970
+ zero_point,
971
+ float_vals.data(),
972
+ (c10::qint32*)qvals.data(),
973
+ 8 * float_num_vecs());
974
+
975
+ return Vectorized<c10::qint32>::loadu(qvals.data());
976
+ }
977
+
978
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
979
+ Vectorized<c10::qint32> retval;
980
+ for (const auto i : c10::irange(size())) {
981
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
982
+ }
983
+ return retval;
984
+ }
985
+
986
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
987
+ Vectorized<c10::qint32> retval;
988
+ for (const auto i : c10::irange(size())) {
989
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
990
+ }
991
+ return retval;
992
+ }
993
+
994
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
995
+ return maximum(zero_point);
996
+ }
997
+
998
+
999
+ Vectorized<c10::qint32> relu6(
1000
+ Vectorized<c10::qint32> zero_point,
1001
+ Vectorized<c10::qint32> q_six) {
1002
+ Vectorized<c10::qint32> retval;
1003
+ for (const auto i : c10::irange(size())) {
1004
+ retval.vals[i] = std::min<value_type>(
1005
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1006
+ }
1007
+ return retval;
1008
+ }
1009
+
1010
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
1011
+ int_vec_return_type retval;
1012
+ for (const auto i : c10::irange(size())) {
1013
+ retval[0].vals[i] = vals[i] - b.vals[i];
1014
+ }
1015
+ return retval;
1016
+ }
1017
+
1018
+ static Vectorized<c10::qint32> requantize_from_int(
1019
+ const int_vec_return_type& inp,
1020
+ float multiplier,
1021
+ int32_t zero_point) {
1022
+ Vectorized<c10::qint32> retval;
1023
+ for (const auto i : c10::irange(size())) {
1024
+ retval.vals[i] =
1025
+ std::nearbyint(static_cast<float>(inp[0].vals[i]) * multiplier) +
1026
+ zero_point;
1027
+ }
1028
+ return retval;
1029
+ }
1030
+ };
1031
+
1032
+ template <>
1033
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
1034
+ return a.maximum(b);
1035
+ }
1036
+
1037
+ template <>
1038
+ Vectorized<c10::qint32> inline operator*(
1039
+ const Vectorized<c10::qint32>& a,
1040
+ const Vectorized<c10::qint32>& b) {
1041
+ Vectorized<c10::qint32> retval;
1042
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1043
+ retval.vals[i] = a.vals[i] * b.vals[i];
1044
+ }
1045
+ return retval;
1046
+ }
1047
+
1048
+ template <>
1049
+ Vectorized<c10::qint32> inline operator+(
1050
+ const Vectorized<c10::qint32>& a,
1051
+ const Vectorized<c10::qint32>& b) {
1052
+ Vectorized<c10::qint32> retval;
1053
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1054
+ retval.vals[i] = a.vals[i] + b.vals[i];
1055
+ }
1056
+ return retval;
1057
+ }
1058
+
1059
+ template <>
1060
+ struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter<
1061
+ c10::qint8,
1062
+ std::array<Vectorized<float>, 4>,
1063
+ std::array<Vectorized<c10::qint32>, 4>,
1064
+ 32> {
1065
+ Vectorized()
1066
+ : VectorizedQuantizedConverter<
1067
+ c10::qint8,
1068
+ std::array<Vectorized<float>, 4>,
1069
+ std::array<Vectorized<c10::qint32>, 4>,
1070
+ 32>() {}
1071
+ Vectorized(c10::qint8 val)
1072
+ : VectorizedQuantizedConverter<
1073
+ c10::qint8,
1074
+ std::array<Vectorized<float>, 4>,
1075
+ std::array<Vectorized<c10::qint32>, 4>,
1076
+ 32>(val) {}
1077
+ Vectorized(const void* ptr)
1078
+ : VectorizedQuantizedConverter<
1079
+ c10::qint8,
1080
+ std::array<Vectorized<float>, 4>,
1081
+ std::array<Vectorized<c10::qint32>, 4>,
1082
+ 32>(ptr) {}
1083
+
1084
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
1085
+ return Vectorized<c10::qint8>(ptr);
1086
+ }
1087
+
1088
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
1089
+ __at_align__ value_type tmp_values[size()];
1090
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1091
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1092
+ // instructions while a loop would be compiled to one instruction.
1093
+ for (const auto i : c10::irange(size())) {
1094
+ tmp_values[i] = 0;
1095
+ }
1096
+ std::memcpy(
1097
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1098
+ return Vectorized<c10::qint8>(tmp_values);
1099
+ }
1100
+
1101
+ static Vectorized<c10::qint8> quantize(
1102
+ const float_vec_return_type& rhs,
1103
+ float scale,
1104
+ int32_t zero_point,
1105
+ float /*inverse_scale*/) {
1106
+ std::array<value_type, size()> qvals;
1107
+ std::array<float, float_num_vecs() * 8> float_vals;
1108
+
1109
+ for (const auto i : c10::irange(float_num_vecs())) {
1110
+ rhs[i].store(&float_vals[i * 8], 8);
1111
+ }
1112
+
1113
+ at::native::quantize_vec<c10::qint8>(
1114
+ scale,
1115
+ zero_point,
1116
+ float_vals.data(),
1117
+ (c10::qint8*)qvals.data(),
1118
+ 8 * float_num_vecs());
1119
+
1120
+ return Vectorized<c10::qint8>::loadu(qvals.data());
1121
+ }
1122
+
1123
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
1124
+ Vectorized<c10::qint8> retval;
1125
+ for (const auto i : c10::irange(size())) {
1126
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1127
+ }
1128
+ return retval;
1129
+ }
1130
+
1131
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
1132
+ Vectorized<c10::qint8> retval;
1133
+ for (const auto i : c10::irange(size())) {
1134
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1135
+ }
1136
+ return retval;
1137
+ }
1138
+
1139
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
1140
+ return maximum(zero_point);
1141
+ }
1142
+
1143
+ Vectorized<c10::qint8> relu6(
1144
+ Vectorized<c10::qint8> zero_point,
1145
+ Vectorized<c10::qint8> q_six) {
1146
+ Vectorized<c10::qint8> retval;
1147
+ for (const auto i : c10::irange(size())) {
1148
+ retval.vals[i] = std::min<value_type>(
1149
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1150
+ }
1151
+ return retval;
1152
+ }
1153
+
1154
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
1155
+ int_vec_return_type retval;
1156
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1157
+ for (const auto i : c10::irange(int_num_vecs())) {
1158
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1159
+ retval[i].vals[j] =
1160
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1161
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1162
+ }
1163
+ }
1164
+ return retval;
1165
+ }
1166
+ static Vectorized<c10::qint8> requantize_from_int(
1167
+ const int_vec_return_type& inp,
1168
+ float multiplier,
1169
+ int32_t zero_point) {
1170
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1171
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1172
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1173
+ Vectorized<c10::qint8> retval;
1174
+ for (const auto i : c10::irange(int_num_vecs())) {
1175
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1176
+ int32_t rounded =
1177
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1178
+ zero_point;
1179
+ retval.vals[i * elem_per_int_vec + j] =
1180
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1181
+ }
1182
+ }
1183
+ return retval;
1184
+ }
1185
+ };
1186
+
1187
+ template <>
1188
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
1189
+ return a.maximum(b);
1190
+ }
1191
+
1192
+ template <>
1193
+ struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter<
1194
+ c10::quint8,
1195
+ std::array<Vectorized<float>, 4>,
1196
+ std::array<Vectorized<c10::qint32>, 4>,
1197
+ 32> {
1198
+ Vectorized()
1199
+ : VectorizedQuantizedConverter<
1200
+ c10::quint8,
1201
+ std::array<Vectorized<float>, 4>,
1202
+ std::array<Vectorized<c10::qint32>, 4>,
1203
+ 32>() {}
1204
+ Vectorized(c10::quint8 val)
1205
+ : VectorizedQuantizedConverter<
1206
+ c10::quint8,
1207
+ std::array<Vectorized<float>, 4>,
1208
+ std::array<Vectorized<c10::qint32>, 4>,
1209
+ 32>(val) {}
1210
+ Vectorized(const void* ptr)
1211
+ : VectorizedQuantizedConverter<
1212
+ c10::quint8,
1213
+ std::array<Vectorized<float>, 4>,
1214
+ std::array<Vectorized<c10::qint32>, 4>,
1215
+ 32>(ptr) {}
1216
+
1217
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
1218
+ return Vectorized<c10::quint8>(ptr);
1219
+ }
1220
+
1221
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
1222
+ __at_align__ value_type tmp_values[size()];
1223
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1224
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1225
+ // instructions while a loop would be compiled to one instruction.
1226
+ for (const auto i : c10::irange(size())) {
1227
+ tmp_values[i] = 0;
1228
+ }
1229
+ std::memcpy(
1230
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1231
+ return Vectorized<c10::quint8>(tmp_values);
1232
+ }
1233
+
1234
+ static Vectorized<c10::quint8> quantize(
1235
+ const float_vec_return_type& rhs,
1236
+ float scale,
1237
+ int32_t zero_point,
1238
+ float /*inverse_scale*/) {
1239
+ std::array<value_type, size()> qvals;
1240
+ std::array<float, float_num_vecs() * 8> float_vals;
1241
+
1242
+ for (const auto i : c10::irange(float_num_vecs())) {
1243
+ rhs[i].store(&float_vals[i * 8], 8);
1244
+ }
1245
+
1246
+ at::native::quantize_vec<c10::quint8>(
1247
+ scale,
1248
+ zero_point,
1249
+ float_vals.data(),
1250
+ (c10::quint8*)qvals.data(),
1251
+ 8 * float_num_vecs());
1252
+
1253
+ return Vectorized<c10::quint8>::loadu(qvals.data());
1254
+ }
1255
+
1256
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
1257
+ Vectorized<c10::quint8> retval;
1258
+ for (const auto i : c10::irange(size())) {
1259
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1260
+ }
1261
+ return retval;
1262
+ }
1263
+
1264
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
1265
+ Vectorized<c10::quint8> retval;
1266
+ for (const auto i : c10::irange(size())) {
1267
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1268
+ }
1269
+ return retval;
1270
+ }
1271
+
1272
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
1273
+ return maximum(zero_point);
1274
+ }
1275
+
1276
+
1277
+ Vectorized<c10::quint8> relu6(
1278
+ Vectorized<c10::quint8> zero_point,
1279
+ Vectorized<c10::quint8> q_six) {
1280
+ Vectorized<c10::quint8> retval;
1281
+ for (const auto i : c10::irange(size())) {
1282
+ retval.vals[i] = std::min<value_type>(
1283
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1284
+ }
1285
+ return retval;
1286
+ }
1287
+
1288
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
1289
+ int_vec_return_type retval;
1290
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1291
+ for (const auto i : c10::irange(int_num_vecs())) {
1292
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1293
+ retval[i].vals[j] =
1294
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1295
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1296
+ }
1297
+ }
1298
+ return retval;
1299
+ }
1300
+ static Vectorized<c10::quint8> requantize_from_int(
1301
+ const int_vec_return_type& inp,
1302
+ float multiplier,
1303
+ int32_t zero_point) {
1304
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1305
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1306
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1307
+ Vectorized<c10::quint8> retval;
1308
+ for (const auto i : c10::irange(int_num_vecs())) {
1309
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1310
+ int32_t rounded =
1311
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1312
+ zero_point;
1313
+ retval.vals[i * elem_per_int_vec + j] =
1314
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1315
+ }
1316
+ }
1317
+ return retval;
1318
+ }
1319
+ };
1320
+
1321
+ template <>
1322
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
1323
+ return a.maximum(b);
1324
+ }
1325
+
1326
+ #endif // if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1327
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
5
+ #include <ATen/cpu/vec/vec_base.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at {
9
+ namespace vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_bfloat16_float(
14
+ const Vectorized<BFloat16>& a) {
15
+ constexpr int64_t K = Vectorized<BFloat16>::size();
16
+ __at_align__ float arr[K];
17
+ __at_align__ BFloat16 arr2[K];
18
+ a.store(arr2);
19
+ convert(arr2, arr, K);
20
+ return std::make_tuple(
21
+ Vectorized<float>::loadu(arr),
22
+ Vectorized<float>::loadu(arr + Vectorized<float>::size()));
23
+ }
24
+
25
+ inline Vectorized<BFloat16> convert_float_bfloat16(
26
+ const Vectorized<float>& a,
27
+ const Vectorized<float>& b) {
28
+ constexpr int64_t K = Vectorized<BFloat16>::size();
29
+ __at_align__ float arr[K];
30
+ __at_align__ BFloat16 arr2[K];
31
+ a.store(arr);
32
+ b.store(arr + Vectorized<float>::size());
33
+ convert(arr, arr2, K);
34
+ return Vectorized<BFloat16>::loadu(arr2);
35
+ }
36
+
37
+ inline void load_fp32_from_bf16(const c10::BFloat16* data, Vectorized<float>& out) {
38
+ __at_align__ float values[Vectorized<float>::size()];
39
+ for (const auto k : c10::irange(Vectorized<float>::size())) {
40
+ values[k] = data[k];
41
+ }
42
+ out = Vectorized<float>::loadu(values);
43
+ }
44
+
45
+ inline void load_fp32_from_bf16(
46
+ const c10::BFloat16* data,
47
+ Vectorized<float>& out1,
48
+ Vectorized<float>& out2) {
49
+ load_fp32_from_bf16(data, out1);
50
+ data += Vectorized<float>::size();
51
+ load_fp32_from_bf16(data, out2);
52
+ }
53
+
54
+ } // namespace
55
+ } // namespace vec
56
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <sleef.h>
7
+ namespace at {
8
+ namespace vec {
9
+ // See Note [CPU_CAPABILITY namespace]
10
+
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ template <>
14
+ class Vectorized<float> {
15
+ private:
16
+ union {
17
+ struct {
18
+ vfloat32 _vec0;
19
+ vfloat32 _vec1;
20
+ };
21
+ struct {
22
+ vbool32 _vecb0;
23
+ vbool32 _vecb1;
24
+ };
25
+
26
+ } __attribute__((__may_alias__));
27
+
28
+ public:
29
+ using value_type = float;
30
+ using vec_internal_type = vfloat32;
31
+ using vec_internal_mask_type = vbool32;
32
+ using size_type = int;
33
+
34
+ static constexpr size_type size() {
35
+ return 8;
36
+ }
37
+ Vectorized() {}
38
+
39
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {}
40
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
41
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {}
42
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
43
+ C10_ALWAYS_INLINE Vectorized(float scalar)
44
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
45
+ C10_ALWAYS_INLINE Vectorized(
46
+ float scalar1,
47
+ float scalar2,
48
+ float scalar3,
49
+ float scalar4,
50
+ float scalar5,
51
+ float scalar6,
52
+ float scalar7,
53
+ float scalar8)
54
+ : _vec0{vfloat32{scalar1, scalar2, scalar3, scalar4}},
55
+ _vec1{vfloat32{scalar5, scalar6, scalar7, scalar8}} {}
56
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
57
+ return _vec0;
58
+ }
59
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
60
+ return _vec1;
61
+ }
62
+
63
+ template <int64_t mask>
64
+ static std::enable_if_t<blendChoice(mask) == 0, Vectorized<float>> C10_ALWAYS_INLINE
65
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
66
+ return a;
67
+ }
68
+
69
+ template <int64_t mask>
70
+ static std::enable_if_t<blendChoice(mask) == 1, Vectorized<float>> C10_ALWAYS_INLINE
71
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
72
+ return b;
73
+ }
74
+
75
+ template <int64_t mask>
76
+ static std::enable_if_t<blendChoice(mask) == 2, Vectorized<float>> C10_ALWAYS_INLINE
77
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
78
+ return {b._vec0, a._vec1};
79
+ }
80
+
81
+ template <int64_t mask>
82
+ static std::enable_if_t<blendChoice(mask) == 3, Vectorized<float>> C10_ALWAYS_INLINE
83
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
84
+ return {a._vec0, b._vec1};
85
+ }
86
+
87
+ template <int64_t mask>
88
+ static std::enable_if_t<blendChoice(mask) == 4, Vectorized<float>> C10_ALWAYS_INLINE
89
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
90
+ const vbool32 mask_1st = VsxMask1(mask);
91
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
92
+ }
93
+
94
+ template <int64_t mask>
95
+ static std::enable_if_t<blendChoice(mask) == 5, Vectorized<float>> C10_ALWAYS_INLINE
96
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
97
+ const vbool32 mask_1st = VsxMask1(mask);
98
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
99
+ }
100
+
101
+ template <int64_t mask>
102
+ static std::enable_if_t<blendChoice(mask) == 6, Vectorized<float>> C10_ALWAYS_INLINE
103
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
104
+ const vbool32 mask_2nd = VsxMask2(mask);
105
+ // generated masks
106
+ return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
107
+ }
108
+
109
+ template <int64_t mask>
110
+ static std::enable_if_t<blendChoice(mask) == 7, Vectorized<float>> C10_ALWAYS_INLINE
111
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
112
+ const vbool32 mask_2nd = VsxMask2(mask);
113
+ // generated masks
114
+ return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
115
+ }
116
+
117
+ template <int64_t mask>
118
+ static std::enable_if_t<blendChoice(mask) == 8, Vectorized<float>> C10_ALWAYS_INLINE
119
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
120
+ const vbool32 mask_1st = VsxMask1(mask);
121
+ const vbool32 mask_2nd = VsxMask2(mask);
122
+ return {
123
+ (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
124
+ (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
125
+ }
126
+
127
+ static Vectorized<float> C10_ALWAYS_INLINE blendv(
128
+ const Vectorized<float>& a,
129
+ const Vectorized<float>& b,
130
+ const Vectorized<float>& mask) {
131
+ // the mask used here returned by comparision of vec256
132
+ // assuming this we can use the same mask directly with vec_sel
133
+ return {
134
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
135
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
136
+ }
137
+
138
+ template <typename step_t>
139
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
140
+ return Vectorized<float>(
141
+ base,
142
+ base + step,
143
+ base + 2 * step,
144
+ base + 3 * step,
145
+ base + 4 * step,
146
+ base + 5 * step,
147
+ base + 6 * step,
148
+ base + 7 * step);
149
+ }
150
+ static Vectorized<float> set(
151
+ const Vectorized<float>& a,
152
+ const Vectorized<float>& b,
153
+ size_t count = size()) {
154
+ switch (count) {
155
+ case 0:
156
+ return a;
157
+ case 1:
158
+ return blend<1>(a, b);
159
+ case 2:
160
+ return blend<3>(a, b);
161
+ case 3:
162
+ return blend<7>(a, b);
163
+ case 4:
164
+ return blend<15>(a, b);
165
+ case 5:
166
+ return blend<31>(a, b);
167
+ case 6:
168
+ return blend<63>(a, b);
169
+ case 7:
170
+ return blend<127>(a, b);
171
+ }
172
+
173
+ return b;
174
+ }
175
+ static Vectorized<value_type> C10_ALWAYS_INLINE
176
+ loadu(const void* ptr, int count = size()) {
177
+ if (count == size()) {
178
+ return {
179
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
180
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
181
+ }
182
+
183
+ __at_align__ value_type tmp_values[size()] = {};
184
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
185
+
186
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
187
+ }
188
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
189
+ if (count == size()) {
190
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
191
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
192
+ } else if (count > 0) {
193
+ __at_align__ value_type tmp_values[size()];
194
+ vec_vsx_st(_vec0, offset0, tmp_values);
195
+ vec_vsx_st(_vec1, offset16, tmp_values);
196
+ std::memcpy(
197
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
198
+ }
199
+ }
200
+
201
+ const float& operator[](int idx) const = delete;
202
+ float& operator[](int idx) = delete;
203
+
204
+ Vectorized<float> map(float (*const f)(float)) const {
205
+ Vectorized<float> ret;
206
+ for (int i = 0; i < size() / 2; i++) {
207
+ ret._vec0[i] = f(_vec0[i]);
208
+ }
209
+ for (int i = 0; i < size() / 2; i++) {
210
+ ret._vec1[i] = f(_vec1[i]);
211
+ }
212
+ return ret;
213
+ }
214
+
215
+ Vectorized<float> mapbi(float (*const f)(float, float), const Vectorized<float>& other)
216
+ const {
217
+ Vectorized<float> ret;
218
+ for (int i = 0; i < size() / 2; i++) {
219
+ ret._vec0[i] = f(_vec0[i], other._vec0[i]);
220
+ }
221
+ for (int i = 0; i < size() / 2; i++) {
222
+ ret._vec1[i] = f(_vec1[i], other._vec1[i]);
223
+ }
224
+ return ret;
225
+ }
226
+
227
+ Vectorized<float> _nor() const {
228
+ return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)};
229
+ }
230
+
231
+ Vectorized<float> isnan() const {
232
+ auto x = *this;
233
+ auto ret = (x == x);
234
+ return ret._nor();
235
+ }
236
+
237
+ Vectorized<float> _isinf() const {
238
+ auto x = *this;
239
+ return (x == v_inf) | (x == v_minus_inf);
240
+ }
241
+
242
+ int zero_mask() const {
243
+ // returns an integer mask where all zero elements are translated to 1-bit
244
+ // and others are translated to 0-bit
245
+ //__m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
246
+ auto cmp = (*this == zero);
247
+ // return _mm256_movemask_ps(cmp);
248
+ // possible simulation //mask= lvsl ( 0 ) vbpermq( vec, mask <<5)
249
+ vuint64 result0 = vec_vbpermq((vuint8)cmp._vecb0, mask_zero_bits);
250
+ vuint64 result1 = vec_vbpermq((vuint8)cmp._vecb1, mask_zero_bits);
251
+ return (result0[1] >> 12 | (result1[1] >> 8));
252
+ }
253
+
254
+ Vectorized<float> C10_ALWAYS_INLINE abs() const {
255
+ return {vec_abs(_vec0), vec_abs(_vec1)};
256
+ }
257
+
258
+ Vectorized<float> C10_ALWAYS_INLINE acos() const {
259
+ return {Sleef_acosf4_u10(_vec0), Sleef_acosf4_u10(_vec1)};
260
+ }
261
+ Vectorized<float> C10_ALWAYS_INLINE asin() const {
262
+ return {Sleef_asinf4_u10(_vec0), Sleef_asinf4_u10(_vec1)};
263
+ }
264
+ Vectorized<float> atan() const {
265
+ return {Sleef_atanf4_u10(_vec0), Sleef_atanf4_u10(_vec1)};
266
+ }
267
+ Vectorized<float> atanh() const {
268
+ return {Sleef_atanhf4_u10(_vec0), Sleef_atanhf4_u10(_vec1)};
269
+ }
270
+ Vectorized<float> atan2(const Vectorized<float>& b) const {
271
+ return {Sleef_atan2f4_u10(_vec0, b._vec0), Sleef_atan2f4_u10(_vec1, b._vec1)};
272
+ }
273
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
274
+ return {Sleef_copysignf4(_vec0, sign._vec0), Sleef_copysignf4(_vec1, sign._vec1)};
275
+ }
276
+ Vectorized<float> lgamma() const {
277
+ return {Sleef_lgammaf4_u10(_vec0), Sleef_lgammaf4_u10(_vec1)};
278
+ }
279
+ Vectorized<float> erf() const {
280
+ return {Sleef_erff4_u10(_vec0), Sleef_erff4_u10(_vec1)};
281
+ }
282
+
283
+ Vectorized<float> erfc() const {
284
+ return {Sleef_erfcf4_u15(_vec0), Sleef_erfcf4_u15(_vec1)};
285
+ }
286
+
287
+ Vectorized<float> erfinv() const {
288
+ return map(calc_erfinv);
289
+ }
290
+
291
+ Vectorized<float> angle() const {
292
+ auto tmp = blendv(
293
+ Vectorized<float>(0), Vectorized<float>(c10::pi<float>), *this < Vectorized<float>(0));
294
+ return blendv(tmp, *this, isnan());
295
+ }
296
+ Vectorized<float> real() const {
297
+ return *this;
298
+ }
299
+ Vectorized<float> imag() const {
300
+ return Vectorized<float>{0};
301
+ }
302
+ Vectorized<float> conj() const {
303
+ return *this;
304
+ }
305
+
306
+ Vectorized<float> C10_ALWAYS_INLINE exp() const {
307
+ return {Sleef_expf4_u10(_vec0), Sleef_expf4_u10(_vec1)};
308
+ }
309
+ Vectorized<float> C10_ALWAYS_INLINE exp2() const {
310
+ return {Sleef_exp2f4_u10(_vec0), Sleef_exp2f4_u10(_vec1)};
311
+ }
312
+ Vectorized<float> expm1() const {
313
+ return {Sleef_expm1f4_u10(_vec0), Sleef_expm1f4_u10(_vec1)};
314
+ }
315
+
316
+ Vectorized<float> C10_ALWAYS_INLINE log() const {
317
+ return {Sleef_logf4_u10(_vec0), Sleef_logf4_u10(_vec1)};
318
+ }
319
+ Vectorized<float> C10_ALWAYS_INLINE log10() const {
320
+ return {Sleef_log10f4_u10(_vec0), Sleef_log10f4_u10(_vec1)};
321
+ }
322
+ Vectorized<float> C10_ALWAYS_INLINE log1p() const {
323
+ return {Sleef_log1pf4_u10(_vec0), Sleef_log1pf4_u10(_vec1)};
324
+ }
325
+ Vectorized<float> C10_ALWAYS_INLINE log2() const {
326
+ return {Sleef_log2f4_u10(_vec0), Sleef_log2f4_u10(_vec1)};
327
+ }
328
+ Vectorized<float> C10_ALWAYS_INLINE ceil() const {
329
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
330
+ }
331
+ Vectorized<float> C10_ALWAYS_INLINE cos() const {
332
+ return {Sleef_cosf4_u10(_vec0), Sleef_cosf4_u10(_vec1)};
333
+ }
334
+ Vectorized<float> C10_ALWAYS_INLINE cosh() const {
335
+ return {Sleef_coshf4_u10(_vec0), Sleef_coshf4_u10(_vec1)};
336
+ }
337
+ Vectorized<float> C10_ALWAYS_INLINE floor() const {
338
+ return {vec_floor(_vec0), vec_floor(_vec1)};
339
+ }
340
+ Vectorized<float> C10_ALWAYS_INLINE neg() const {
341
+ return {vec_neg(_vec0), vec_neg(_vec1)};
342
+ }
343
+
344
+ Vectorized<float> C10_ALWAYS_INLINE round() const {
345
+ return {vec_round(_vec0), vec_round(_vec1)};
346
+ }
347
+ Vectorized<float> C10_ALWAYS_INLINE sin() const {
348
+ return {Sleef_sinf4_u10(_vec0), Sleef_sinf4_u10(_vec1)};
349
+ }
350
+ Vectorized<float> C10_ALWAYS_INLINE sinh() const {
351
+ return {Sleef_sinhf4_u10(_vec0), Sleef_sinhf4_u10(_vec1)};
352
+ }
353
+ Vectorized<float> C10_ALWAYS_INLINE tan() const {
354
+ return {Sleef_tanf4_u10(_vec0), Sleef_tanf4_u10(_vec1)};
355
+ }
356
+ Vectorized<float> C10_ALWAYS_INLINE tanh() const {
357
+ return {Sleef_tanhf4_u10(_vec0), Sleef_tanhf4_u10(_vec1)};
358
+ }
359
+ Vectorized<float> C10_ALWAYS_INLINE trunc() const {
360
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
361
+ }
362
+
363
+ Vectorized<float> C10_ALWAYS_INLINE frac() const {
364
+ return *this - trunc();
365
+ }
366
+
367
+ Vectorized<float> C10_ALWAYS_INLINE sqrt() const {
368
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
369
+ }
370
+ Vectorized<float> C10_ALWAYS_INLINE reciprocal() const {
371
+ return Vectorized<float>(one) / (*this);
372
+ }
373
+ Vectorized<float> C10_ALWAYS_INLINE rsqrt() const {
374
+ return sqrt().reciprocal();
375
+ }
376
+
377
+ Vectorized<float> C10_ALWAYS_INLINE pow(const Vectorized<float>& exp) const {
378
+ return {Sleef_powf4_u10(_vec0, exp._vec0), Sleef_powf4_u10(_vec1, exp._vec1)};
379
+ }
380
+
381
+ Vectorized<float> fmod(const Vectorized<float>& b) const {
382
+ return {Sleef_fmodf4(_vec0, b._vec0),Sleef_fmodf4(_vec1, b._vec1)};
383
+ }
384
+
385
+ Vectorized<float> hypot(const Vectorized<float>& b) const {
386
+ return {Sleef_hypotf4_u05(_vec0, b._vec0), Sleef_hypotf4_u05(_vec1, b._vec1)};
387
+ }
388
+
389
+ Vectorized<float> nextafter(const Vectorized<float>& b) const {
390
+ return {Sleef_nextafterf4(_vec0, b._vec0), Sleef_nextafterf4(_vec1, b._vec1)};
391
+ }
392
+
393
+ Vectorized<float> igamma(const Vectorized<float>& x) const {
394
+ return mapbi(calc_igamma, x);
395
+ }
396
+
397
+ Vectorized<float> igammac(const Vectorized<float>& x) const {
398
+ return mapbi(calc_igammac, x);
399
+ }
400
+
401
+ Vectorized<float> i0() const {
402
+ return map(calc_i0);
403
+ }
404
+
405
+ Vectorized<float> i0e() const {
406
+ return map(calc_i0e);
407
+ }
408
+
409
+ Vectorized<float> digamma() const {
410
+ return map(calc_digamma);
411
+ }
412
+
413
+ DEFINE_MEMBER_OP(operator==, float, vec_cmpeq)
414
+ DEFINE_MEMBER_OP(operator!=, float, vec_cmpne)
415
+ DEFINE_MEMBER_OP(operator<, float, vec_cmplt)
416
+ DEFINE_MEMBER_OP(operator<=, float, vec_cmple)
417
+ DEFINE_MEMBER_OP(operator>, float, vec_cmpgt)
418
+ DEFINE_MEMBER_OP(operator>=, float, vec_cmpge)
419
+ DEFINE_MEMBER_OP_AND_ONE(eq, float, vec_cmpeq)
420
+ DEFINE_MEMBER_OP_AND_ONE(ne, float, vec_cmpne)
421
+ DEFINE_MEMBER_OP_AND_ONE(lt, float, vec_cmplt)
422
+ DEFINE_MEMBER_OP_AND_ONE(le, float, vec_cmple)
423
+ DEFINE_MEMBER_OP_AND_ONE(gt, float, vec_cmpgt)
424
+ DEFINE_MEMBER_OP_AND_ONE(ge, float, vec_cmpge)
425
+ DEFINE_MEMBER_OP(operator+, float, vec_add)
426
+ DEFINE_MEMBER_OP(operator-, float, vec_sub)
427
+ DEFINE_MEMBER_OP(operator*, float, vec_mul)
428
+ DEFINE_MEMBER_OP(operator/, float, vec_div)
429
+ DEFINE_MEMBER_OP(maximum, float, vec_max_nan2)
430
+ DEFINE_MEMBER_OP(minimum, float, vec_min_nan2)
431
+ DEFINE_MEMBER_OP(operator&, float, vec_and)
432
+ DEFINE_MEMBER_OP(operator|, float, vec_or)
433
+ DEFINE_MEMBER_OP(operator^, float, vec_xor)
434
+ DEFINE_MEMBER_TERNARY_OP(madd, float, vec_madd)
435
+ };
436
+
437
+ template <>
438
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
439
+ return a.maximum(b);
440
+ }
441
+
442
+ template <>
443
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
444
+ return a.minimum(b);
445
+ }
446
+
447
+ } // namespace
448
+ } // namespace vec
449
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ namespace at {
7
+ namespace vec {
8
+ // See Note [CPU_CAPABILITY namespace]
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ template <>
12
+ class Vectorized<int16_t> {
13
+ private:
14
+ union {
15
+ struct {
16
+ vint16 _vec0;
17
+ vint16 _vec1;
18
+ };
19
+ struct {
20
+ vbool16 _vecb0;
21
+ vbool16 _vecb1;
22
+ };
23
+
24
+ } __attribute__((__may_alias__));
25
+
26
+ public:
27
+ using value_type = int16_t;
28
+ using vec_internal_type = vint16;
29
+ using vec_internal_mask_type = vbool16;
30
+ using size_type = int;
31
+ static constexpr size_type size() {
32
+ return 16;
33
+ }
34
+ Vectorized() {}
35
+ C10_ALWAYS_INLINE Vectorized(vint16 v) : _vec0{v}, _vec1{v} {}
36
+ C10_ALWAYS_INLINE Vectorized(vbool16 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
37
+ C10_ALWAYS_INLINE Vectorized(vint16 v1, vint16 v2) : _vec0{v1}, _vec1{v2} {}
38
+ C10_ALWAYS_INLINE Vectorized(vbool16 v1, vbool16 v2) : _vecb0{v1}, _vecb1{v2} {}
39
+ C10_ALWAYS_INLINE Vectorized(int16_t scalar)
40
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
41
+
42
+ C10_ALWAYS_INLINE Vectorized(
43
+ int16_t scalar1,
44
+ int16_t scalar2,
45
+ int16_t scalar3,
46
+ int16_t scalar4,
47
+ int16_t scalar5,
48
+ int16_t scalar6,
49
+ int16_t scalar7,
50
+ int16_t scalar8,
51
+ int16_t scalar9,
52
+ int16_t scalar10,
53
+ int16_t scalar11,
54
+ int16_t scalar12,
55
+ int16_t scalar13,
56
+ int16_t scalar14,
57
+ int16_t scalar15,
58
+ int16_t scalar16)
59
+ : _vec0{vint16{
60
+ scalar1,
61
+ scalar2,
62
+ scalar3,
63
+ scalar4,
64
+ scalar5,
65
+ scalar6,
66
+ scalar7,
67
+ scalar8}},
68
+ _vec1{vint16{
69
+ scalar9,
70
+ scalar10,
71
+ scalar11,
72
+ scalar12,
73
+ scalar13,
74
+ scalar14,
75
+ scalar15,
76
+ scalar16}} {}
77
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
78
+ return _vec0;
79
+ }
80
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
81
+ return _vec1;
82
+ }
83
+
84
+ template <uint64_t mask>
85
+ static std::enable_if_t<mask == 0, Vectorized<int16_t>> C10_ALWAYS_INLINE
86
+ blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
87
+ return a;
88
+ }
89
+
90
+ template <uint64_t mask>
91
+ static std::enable_if_t<(mask & 65535) == 65535, Vectorized<int16_t>>
92
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
93
+ return b;
94
+ }
95
+
96
+ template <uint64_t mask>
97
+ static std::enable_if_t<mask == 255, Vectorized<int16_t>> C10_ALWAYS_INLINE
98
+ blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
99
+ return {b._vec0, a._vec1};
100
+ }
101
+
102
+ template <uint64_t mask>
103
+ static std::enable_if_t<(mask > 0 && mask < 255), Vectorized<int16_t>>
104
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
105
+ constexpr int16_t g0 = (mask & 1) * 0xffff;
106
+ constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff;
107
+ constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff;
108
+ constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff;
109
+ constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff;
110
+ constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff;
111
+ constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff;
112
+ constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff;
113
+ const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7};
114
+
115
+ return {(vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st), a._vec1};
116
+ }
117
+
118
+ template <uint64_t mask>
119
+ static std::enable_if_t<
120
+ (mask > 255 && (mask & 65535) != 65535 && ((mask & 255) == 255)),
121
+ Vectorized<int16_t>>
122
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
123
+ constexpr int16_t g0_2 = (mask & 1) * 0xffff;
124
+ constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
125
+ constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
126
+ constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
127
+ constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
128
+ constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
129
+ constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
130
+ constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
131
+
132
+ const vint16 mask_2nd =
133
+ vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
134
+ // generated masks
135
+ return {b._vec0, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
136
+ }
137
+
138
+ template <uint64_t mask>
139
+ static std::enable_if_t<
140
+ (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) == 0)),
141
+ Vectorized<int16_t>>
142
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
143
+ constexpr int16_t mask2 = (mask & 65535) >> 16;
144
+ constexpr int16_t g0_2 = (mask & 1) * 0xffff;
145
+ constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
146
+ constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
147
+ constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
148
+ constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
149
+ constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
150
+ constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
151
+ constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
152
+
153
+ const vint16 mask_2nd =
154
+ vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
155
+ // generated masks
156
+ return {a, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
157
+ }
158
+
159
+ template <uint64_t mask>
160
+ static std::enable_if_t<
161
+ (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) != 0) &&
162
+ ((mask & 255) != 255)),
163
+ Vectorized<int16_t>>
164
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
165
+ constexpr int16_t g0 = (mask & 1) * 0xffff;
166
+ constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff;
167
+ constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff;
168
+ constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff;
169
+ constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff;
170
+ constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff;
171
+ constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff;
172
+ constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff;
173
+ constexpr int16_t mask2 = (mask & 65535) >> 16;
174
+ constexpr int16_t g0_2 = (mask & 1) * 0xffff;
175
+ constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
176
+ constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
177
+ constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
178
+ constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
179
+ constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
180
+ constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
181
+ constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
182
+
183
+ const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7};
184
+ const vint16 mask_2nd =
185
+ vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
186
+ // generated masks
187
+ return {
188
+ (vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st),
189
+ (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
190
+ }
191
+
192
+ static Vectorized<int16_t> C10_ALWAYS_INLINE blendv(
193
+ const Vectorized<int16_t>& a,
194
+ const Vectorized<int16_t>& b,
195
+ const Vectorized<int16_t>& mask) {
196
+ // the mask used here returned by comparision of vec256
197
+ // assuming this we can use the same mask directly with vec_sel
198
+ // warning intel style mask will not work properly
199
+ return {
200
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
201
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
202
+ }
203
+
204
+ template <typename step_t>
205
+ static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
206
+ return Vectorized<int16_t>(
207
+ base,
208
+ base + step,
209
+ base + 2 * step,
210
+ base + 3 * step,
211
+ base + 4 * step,
212
+ base + 5 * step,
213
+ base + 6 * step,
214
+ base + 7 * step,
215
+ base + 8 * step,
216
+ base + 9 * step,
217
+ base + 10 * step,
218
+ base + 11 * step,
219
+ base + 12 * step,
220
+ base + 13 * step,
221
+ base + 14 * step,
222
+ base + 15 * step);
223
+ }
224
+ static Vectorized<int16_t> set(
225
+ const Vectorized<int16_t>& a,
226
+ const Vectorized<int16_t>& b,
227
+ size_t count = size()) {
228
+ switch (count) {
229
+ case 0:
230
+ return a;
231
+ case 1:
232
+ return blend<1>(a, b);
233
+ case 2:
234
+ return blend<3>(a, b);
235
+ case 3:
236
+ return blend<7>(a, b);
237
+ case 4:
238
+ return blend<15>(a, b);
239
+ case 5:
240
+ return blend<31>(a, b);
241
+ case 6:
242
+ return blend<63>(a, b);
243
+ case 7:
244
+ return blend<127>(a, b);
245
+ case 8:
246
+ return blend<255>(a, b);
247
+ case 9:
248
+ return blend<511>(a, b);
249
+ case 10:
250
+ return blend<1023>(a, b);
251
+ case 11:
252
+ return blend<2047>(a, b);
253
+ case 12:
254
+ return blend<4095>(a, b);
255
+ case 13:
256
+ return blend<8191>(a, b);
257
+ case 14:
258
+ return blend<16383>(a, b);
259
+ case 15:
260
+ return blend<32767>(a, b);
261
+ }
262
+ return b;
263
+ }
264
+ static Vectorized<value_type> C10_ALWAYS_INLINE
265
+ loadu(const void* ptr, int count = size()) {
266
+ if (count == size()) {
267
+ return {
268
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
269
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
270
+ }
271
+
272
+ __at_align__ value_type tmp_values[size()] = {};
273
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
274
+
275
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
276
+ }
277
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
278
+ if (count == size()) {
279
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
280
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
281
+ } else if (count > 0) {
282
+ __at_align__ value_type tmp_values[size()];
283
+ vec_vsx_st(_vec0, offset0, tmp_values);
284
+ vec_vsx_st(_vec1, offset16, tmp_values);
285
+ std::memcpy(ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
286
+ }
287
+ }
288
+ const int16_t& operator[](int idx) const = delete;
289
+ int16_t& operator[](int idx) = delete;
290
+
291
+ Vectorized<int16_t> angle() const {
292
+ return blendv(
293
+ Vectorized<int16_t>(0), Vectorized<int16_t>(c10::pi<int16_t>), *this < Vectorized<int16_t>(0));
294
+ }
295
+ Vectorized<int16_t> real() const {
296
+ return *this;
297
+ }
298
+ Vectorized<int16_t> imag() const {
299
+ return Vectorized<int16_t>{0};
300
+ }
301
+ Vectorized<int16_t> conj() const {
302
+ return *this;
303
+ }
304
+
305
+ Vectorized<int16_t> C10_ALWAYS_INLINE abs() const {
306
+ return {vec_abs(_vec0), vec_abs(_vec1)};
307
+ }
308
+
309
+ Vectorized<int16_t> C10_ALWAYS_INLINE neg() const {
310
+ return {vec_neg(_vec0), vec_neg(_vec1)};
311
+ }
312
+
313
+ DEFINE_MEMBER_UNARY_OP(operator~, int16_t, vec_not)
314
+ DEFINE_MEMBER_OP(operator==, int16_t, vec_cmpeq)
315
+ DEFINE_MEMBER_OP(operator!=, int16_t, vec_cmpne)
316
+ DEFINE_MEMBER_OP(operator<, int16_t, vec_cmplt)
317
+ DEFINE_MEMBER_OP(operator<=, int16_t, vec_cmple)
318
+ DEFINE_MEMBER_OP(operator>, int16_t, vec_cmpgt)
319
+ DEFINE_MEMBER_OP(operator>=, int16_t, vec_cmpge)
320
+ DEFINE_MEMBER_OP_AND_ONE(eq, int16_t, vec_cmpeq)
321
+ DEFINE_MEMBER_OP_AND_ONE(ne, int16_t, vec_cmpne)
322
+ DEFINE_MEMBER_OP_AND_ONE(lt, int16_t, vec_cmplt)
323
+ DEFINE_MEMBER_OP_AND_ONE(le, int16_t, vec_cmple)
324
+ DEFINE_MEMBER_OP_AND_ONE(gt, int16_t, vec_cmpgt)
325
+ DEFINE_MEMBER_OP_AND_ONE(ge, int16_t, vec_cmpge)
326
+ DEFINE_MEMBER_OP(operator+, int16_t, vec_add)
327
+ DEFINE_MEMBER_OP(operator-, int16_t, vec_sub)
328
+ DEFINE_MEMBER_OP(operator*, int16_t, vec_mul)
329
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int16_t, /)
330
+ DEFINE_MEMBER_OP(maximum, int16_t, vec_max)
331
+ DEFINE_MEMBER_OP(minimum, int16_t, vec_min)
332
+ DEFINE_MEMBER_OP(operator&, int16_t, vec_and)
333
+ DEFINE_MEMBER_OP(operator|, int16_t, vec_or)
334
+ DEFINE_MEMBER_OP(operator^, int16_t, vec_xor)
335
+ };
336
+
337
+ template <>
338
+ Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
339
+ vuint16 shift_vec0 = reinterpret_cast<vuint16>(b.vec0());
340
+ vuint16 shift_vec1 = reinterpret_cast<vuint16>(b.vec1());
341
+ return Vectorized<int16_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
342
+ }
343
+
344
+ template <>
345
+ Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
346
+ vuint16 shift_vec0 = reinterpret_cast<vuint16>(b.vec0());
347
+ vuint16 shift_vec1 = reinterpret_cast<vuint16>(b.vec1()) ;
348
+ return Vectorized<int16_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
349
+ }
350
+
351
+ template <>
352
+ Vectorized<int16_t> inline maximum(
353
+ const Vectorized<int16_t>& a,
354
+ const Vectorized<int16_t>& b) {
355
+ return a.maximum(b);
356
+ }
357
+
358
+ template <>
359
+ Vectorized<int16_t> inline minimum(
360
+ const Vectorized<int16_t>& a,
361
+ const Vectorized<int16_t>& b) {
362
+ return a.minimum(b);
363
+ }
364
+
365
+
366
+ } // namespace
367
+ } // namespace vec
368
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ namespace at {
7
+ namespace vec {
8
+ // See Note [CPU_CAPABILITY namespace]
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ template <>
12
+ class Vectorized<int32_t> {
13
+ private:
14
+ union {
15
+ struct {
16
+ vint32 _vec0;
17
+ vint32 _vec1;
18
+ };
19
+ struct {
20
+ vbool32 _vecb0;
21
+ vbool32 _vecb1;
22
+ };
23
+
24
+ } __attribute__((__may_alias__));
25
+
26
+ public:
27
+ using value_type = int32_t;
28
+ using vec_internal_type = vint32;
29
+ using vec_internal_mask_type = vbool32;
30
+ using size_type = int;
31
+ static constexpr size_type size() {
32
+ return 8;
33
+ }
34
+ Vectorized() {}
35
+ C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
36
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
37
+ C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
38
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
39
+ C10_ALWAYS_INLINE Vectorized(int32_t scalar)
40
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
41
+ C10_ALWAYS_INLINE Vectorized(
42
+ int32_t scalar1,
43
+ int32_t scalar2,
44
+ int32_t scalar3,
45
+ int32_t scalar4,
46
+ int32_t scalar5,
47
+ int32_t scalar6,
48
+ int32_t scalar7,
49
+ int32_t scalar8)
50
+ : _vec0{vint32{scalar1, scalar2, scalar3, scalar4}},
51
+ _vec1{vint32{scalar5, scalar6, scalar7, scalar8}} {}
52
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
53
+ return _vec0;
54
+ }
55
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
56
+ return _vec1;
57
+ }
58
+
59
+ template <uint64_t mask>
60
+ static std::enable_if_t<mask == 0, Vectorized<int32_t>> C10_ALWAYS_INLINE
61
+ blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
62
+ return a;
63
+ }
64
+
65
+ template <uint64_t mask>
66
+ static std::enable_if_t<(mask & 255) == 255, Vectorized<int32_t>> C10_ALWAYS_INLINE
67
+ blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
68
+ return b;
69
+ }
70
+
71
+ template <uint64_t mask>
72
+ static std::enable_if_t<mask == 15, Vectorized<int32_t>> C10_ALWAYS_INLINE
73
+ blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
74
+ return {b._vec0, a._vec1};
75
+ }
76
+
77
+ template <uint64_t mask>
78
+ static std::enable_if_t<(mask > 0 && mask < 15), Vectorized<int32_t>>
79
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
80
+ constexpr uint32_t g0 = (mask & 1) * 0xffffffff;
81
+ constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
82
+ constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
83
+ constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
84
+ const vbool32 mask_1st = (vbool32){g0, g1, g2, g3};
85
+
86
+ return {(vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st), a._vec1};
87
+ }
88
+
89
+ template <uint64_t mask>
90
+ static std::enable_if_t<
91
+ (mask > 15 && (mask & 255) != 255 && ((mask & 15) == 15)),
92
+ Vectorized<int32_t>>
93
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
94
+ constexpr uint32_t mask2 = (mask & 255) >> 4;
95
+ constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
96
+ constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
97
+ constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
98
+ constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
99
+
100
+ const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
101
+ // generated masks
102
+ return {b._vec0, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
103
+ }
104
+
105
+ template <uint64_t mask>
106
+ static std::enable_if_t<
107
+ (mask > 15 && ((mask & 255) != 255) && ((mask & 15) == 0)),
108
+ Vectorized<int32_t>>
109
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
110
+ constexpr uint32_t mask2 = (mask & 255) >> 4;
111
+ constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
112
+ constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
113
+ constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
114
+ constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
115
+
116
+ const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
117
+ // generated masks
118
+ return {a, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
119
+ }
120
+
121
+ template <uint64_t mask>
122
+ static std::enable_if_t<
123
+ (mask > 15 && ((mask & 255) != 255) && ((mask & 15) != 0) &&
124
+ ((mask & 15) != 15)),
125
+ Vectorized<int32_t>>
126
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
127
+ constexpr uint32_t g0 = (mask & 1) * 0xffffffff;
128
+ constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
129
+ constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
130
+ constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
131
+ constexpr uint32_t mask2 = (mask & 255) >> 4;
132
+ constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
133
+ constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
134
+ constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
135
+ constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
136
+
137
+ const vbool32 mask_1st = (vbool32){g0, g1, g2, g3};
138
+ const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
139
+ // generated masks
140
+ return {
141
+ (vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st),
142
+ (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
143
+ }
144
+
145
+ static Vectorized<int32_t> C10_ALWAYS_INLINE blendv(
146
+ const Vectorized<int32_t>& a,
147
+ const Vectorized<int32_t>& b,
148
+ const Vectorized<int32_t>& mask) {
149
+ // the mask used here returned by comparision of vec256
150
+ // assuming this we can use the same mask directly with vec_sel
151
+ // warning intel style mask will not work properly
152
+ return {
153
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
154
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
155
+ }
156
+
157
+ template <typename step_t>
158
+ static Vectorized<int32_t> arange(int32_t base = 0.f, step_t step = static_cast<step_t>(1)) {
159
+ return Vectorized<int32_t>(
160
+ base,
161
+ base + step,
162
+ base + 2 * step,
163
+ base + 3 * step,
164
+ base + 4 * step,
165
+ base + 5 * step,
166
+ base + 6 * step,
167
+ base + 7 * step);
168
+ }
169
+ static Vectorized<int32_t> set(
170
+ const Vectorized<int32_t>& a,
171
+ const Vectorized<int32_t>& b,
172
+ size_t count = size()) {
173
+ switch (count) {
174
+ case 0:
175
+ return a;
176
+ case 1:
177
+ return blend<1>(a, b);
178
+ case 2:
179
+ return blend<3>(a, b);
180
+ case 3:
181
+ return blend<7>(a, b);
182
+ case 4:
183
+ return blend<15>(a, b);
184
+ case 5:
185
+ return blend<31>(a, b);
186
+ case 6:
187
+ return blend<63>(a, b);
188
+ case 7:
189
+ return blend<127>(a, b);
190
+ }
191
+
192
+ return b;
193
+ }
194
+ static Vectorized<value_type> C10_ALWAYS_INLINE
195
+ loadu(const void* ptr, int count = size()) {
196
+ if (count == size()) {
197
+ return {
198
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
199
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
200
+ }
201
+
202
+ __at_align__ value_type tmp_values[size()] = {};
203
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
204
+
205
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
206
+ }
207
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
208
+ if (count == size()) {
209
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
210
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
211
+ } else if (count > 0) {
212
+ __at_align__ value_type tmp_values[size()];
213
+ vec_vsx_st(_vec0, offset0, tmp_values);
214
+ vec_vsx_st(_vec1, offset16, tmp_values);
215
+ std::memcpy(
216
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
217
+ }
218
+ }
219
+ const int32_t& operator[](int idx) const = delete;
220
+ int32_t& operator[](int idx) = delete;
221
+
222
+ Vectorized<int32_t> angle() const {
223
+ return blendv(
224
+ Vectorized<int32_t>(0), Vectorized<int32_t>(c10::pi<int32_t>), *this < Vectorized<int32_t>(0));
225
+ }
226
+ Vectorized<int32_t> real() const {
227
+ return *this;
228
+ }
229
+ Vectorized<int32_t> imag() const {
230
+ return Vectorized<int32_t>{0};
231
+ }
232
+ Vectorized<int32_t> conj() const {
233
+ return *this;
234
+ }
235
+
236
+ Vectorized<int32_t> C10_ALWAYS_INLINE abs() const {
237
+ return {vec_abs(_vec0), vec_abs(_vec1)};
238
+ }
239
+
240
+ Vectorized<int32_t> C10_ALWAYS_INLINE neg() const {
241
+ return {vec_neg(_vec0), vec_neg(_vec1)};
242
+ }
243
+
244
+ DEFINE_MEMBER_UNARY_OP(operator~, int32_t, vec_not)
245
+ DEFINE_MEMBER_OP(operator==, int32_t, vec_cmpeq)
246
+ DEFINE_MEMBER_OP(operator!=, int32_t, vec_cmpne)
247
+ DEFINE_MEMBER_OP(operator<, int32_t, vec_cmplt)
248
+ DEFINE_MEMBER_OP(operator<=, int32_t, vec_cmple)
249
+ DEFINE_MEMBER_OP(operator>, int32_t, vec_cmpgt)
250
+ DEFINE_MEMBER_OP(operator>=, int32_t, vec_cmpge)
251
+ DEFINE_MEMBER_OP_AND_ONE(eq, int32_t, vec_cmpeq)
252
+ DEFINE_MEMBER_OP_AND_ONE(ne, int32_t, vec_cmpne)
253
+ DEFINE_MEMBER_OP_AND_ONE(lt, int32_t, vec_cmplt)
254
+ DEFINE_MEMBER_OP_AND_ONE(le, int32_t, vec_cmple)
255
+ DEFINE_MEMBER_OP_AND_ONE(gt, int32_t, vec_cmpgt)
256
+ DEFINE_MEMBER_OP_AND_ONE(ge, int32_t, vec_cmpge)
257
+ DEFINE_MEMBER_OP(operator+, int32_t, vec_add)
258
+ DEFINE_MEMBER_OP(operator-, int32_t, vec_sub)
259
+ DEFINE_MEMBER_OP(operator*, int32_t, vec_mul)
260
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int32_t, /)
261
+ DEFINE_MEMBER_OP(maximum, int32_t, vec_max)
262
+ DEFINE_MEMBER_OP(minimum, int32_t, vec_min)
263
+ DEFINE_MEMBER_OP(operator&, int32_t, vec_and)
264
+ DEFINE_MEMBER_OP(operator|, int32_t, vec_or)
265
+ DEFINE_MEMBER_OP(operator^, int32_t, vec_xor)
266
+ };
267
+
268
+ template <>
269
+ Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
270
+ vuint32 shift_vec0 = reinterpret_cast<vuint32>(b.vec0());
271
+ vuint32 shift_vec1 = reinterpret_cast<vuint32>(b.vec1()) ;
272
+ return Vectorized<int32_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
273
+ }
274
+
275
+ template <>
276
+ Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
277
+ vuint32 shift_vec0 = reinterpret_cast<vuint32>(b.vec0());
278
+ vuint32 shift_vec1 = reinterpret_cast<vuint32>(b.vec1()) ;
279
+ return Vectorized<int32_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
280
+ }
281
+
282
+ template <>
283
+ Vectorized<int32_t> inline maximum(
284
+ const Vectorized<int32_t>& a,
285
+ const Vectorized<int32_t>& b) {
286
+ return a.maximum(b);
287
+ }
288
+
289
+ template <>
290
+ Vectorized<int32_t> inline minimum(
291
+ const Vectorized<int32_t>& a,
292
+ const Vectorized<int32_t>& b) {
293
+ return a.minimum(b);
294
+ }
295
+
296
+ } // namespace
297
+ } // namespace vec
298
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/qint32.h>
7
+ #include <array>
8
+
9
+ // This file defines Vectorized<> for the quantized types.
10
+ //
11
+ //
12
+ // Currently, we simply use these classes as efficient converters between
13
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
14
+ // where doing the arithmetic in full-precision is acceptable (e.g.
15
+ // elementwise operators).
16
+ //
17
+ //
18
+ // Conversions are as follows:
19
+ // Vectorized<qint32> -> 1x Vectorized<float>
20
+ //
21
+ // The size of the returned float vector is specified by the special
22
+ // constexpr function float_num_vecs. The type of the value returned
23
+ // from dequantize (and expected as an argument to quantize) is
24
+ // specified by float_vec_return_type.
25
+ //
26
+ // When writing kernels with these vectors, it is expected that floating-
27
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
28
+ // iterations.
29
+
30
+ namespace at {
31
+ namespace vec {
32
+ inline namespace CPU_CAPABILITY {
33
+
34
+ template <>
35
+ struct Vectorized<c10::qint32> {
36
+ private:
37
+ union {
38
+ struct {
39
+ vint32 _vec0;
40
+ vint32 _vec1;
41
+ };
42
+ struct {
43
+ vbool32 _vecb0;
44
+ vbool32 _vecb1;
45
+ };
46
+
47
+ } __attribute__((__may_alias__));
48
+
49
+ public:
50
+ Vectorized() {}
51
+
52
+ using size_type = int;
53
+ static constexpr size_type size() {
54
+ return 8;
55
+ }
56
+
57
+ static constexpr size_t float_num_vecs() {
58
+ return 1;
59
+ }
60
+ static constexpr int int_num_vecs() {
61
+ return 1;
62
+ }
63
+ using float_vec_return_type = std::array<Vectorized<float>, 1>;
64
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
65
+ using value_type = c10::qint32::underlying;
66
+ using vec_internal_type = vint32;
67
+ using vec_internal_mask_type = vbool32;
68
+ C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
69
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
70
+ C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
71
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
72
+
73
+ Vectorized(const c10::qint32& val)
74
+ : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {}
75
+
76
+ static Vectorized<c10::qint32> C10_ALWAYS_INLINE
77
+ loadu(const void* ptr, int count = size()) {
78
+ if (count == size()) {
79
+ return {
80
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
81
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
82
+ }
83
+
84
+ __at_align__ value_type tmp_values[size()] = {};
85
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
86
+
87
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
88
+ }
89
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
90
+ if (count == size()) {
91
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
92
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
93
+ } else if (count > 0) {
94
+ __at_align__ value_type tmp_values[size()];
95
+ vec_vsx_st(_vec0, offset0, tmp_values);
96
+ vec_vsx_st(_vec1, offset16, tmp_values);
97
+ std::memcpy(
98
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
99
+ }
100
+ }
101
+
102
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
103
+ return _vec0;
104
+ }
105
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
106
+ return _vec1;
107
+ }
108
+
109
+ float_vec_return_type dequantize(
110
+ Vectorized<float> scale,
111
+ Vectorized<float> zero_point,
112
+ Vectorized<float> scale_zp_premul) const {
113
+ vfloat32 float_vals0 = vec_float(_vec0);
114
+ vfloat32 float_vals1 = vec_float(_vec1);
115
+ vfloat32 scale_vec0 = scale.vec0();
116
+ vfloat32 scale_vec1 = scale.vec1();
117
+ vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
118
+ vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
119
+ return {Vectorized<float>{
120
+ vec_madd(scale_vec0, float_vals0, scale_zp_premul0),
121
+ vec_madd(scale_vec1, float_vals1, scale_zp_premul1)}};
122
+ }
123
+
124
+ float_vec_return_type dequantize(
125
+ Vectorized<float> scale,
126
+ Vectorized<float> zero_point) const {
127
+ vfloat32 float_vals0 = vec_float(_vec0);
128
+ vfloat32 float_vals1 = vec_float(_vec1);
129
+ vfloat32 scale_vec0 = scale.vec0();
130
+ vfloat32 scale_vec1 = scale.vec1();
131
+ vfloat32 zero_point0 = zero_point.vec0();
132
+ vfloat32 zero_point1 = zero_point.vec1();
133
+ return {Vectorized<float>{
134
+ (float_vals0 - zero_point0) * scale_vec0,
135
+ (float_vals1 - zero_point1) * scale_vec1}};
136
+ }
137
+
138
+ static Vectorized<c10::qint32> quantize(
139
+ const float_vec_return_type& rhs,
140
+ float scale,
141
+ int32_t zero_point,
142
+ float inverse_scale) {
143
+ Vectorized<c10::qint32> retval;
144
+
145
+ const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
146
+ const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
147
+ vfloat32 inverse_scale_v = vec_splats(inverse_scale);
148
+ vfloat32 vec_zero_point = vec_splats((float)(zero_point));
149
+ Vectorized<float> vf0 = rhs[0];
150
+
151
+ vfloat32 vecf0 = vf0.vec0();
152
+ vfloat32 vecf1 = vf0.vec1();
153
+ vecf0 = vec_mul(vecf0, inverse_scale_v);
154
+ vecf1 = vec_mul(vecf1, inverse_scale_v);
155
+ vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
156
+ vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
157
+ vint32 veci0 = vec_signed(vecf0);
158
+ vint32 veci1 = vec_signed(vecf1);
159
+
160
+ veci0 = vec_max(veci0, vmin);
161
+ veci1 = vec_max(veci1, vmin);
162
+ veci0 = vec_min(veci0, vmax);
163
+ veci1 = vec_min(veci1, vmax);
164
+
165
+ return {veci0, veci1};
166
+ }
167
+
168
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
169
+ return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
170
+ }
171
+
172
+ Vectorized<c10::qint32> relu6(
173
+ Vectorized<c10::qint32> zero_point,
174
+ Vectorized<c10::qint32> q_six) const {
175
+ vint32 max0 = vec_max(_vec0, zero_point._vec0);
176
+ vint32 max1 = vec_max(_vec1, zero_point._vec1);
177
+ return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
178
+ }
179
+
180
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
181
+ return {*this - b};
182
+ }
183
+
184
+ static Vectorized<c10::qint32> requantize_from_int(
185
+ const int_vec_return_type& inp,
186
+ float multiplier,
187
+ int32_t zero_point) {
188
+ const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
189
+ const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
190
+ vfloat32 vec_mult = vec_splats(multiplier);
191
+ vint32 vec_zero_point = vec_splats(zero_point);
192
+ Vectorized<c10::qint32> vi = inp[0];
193
+ vfloat32 vecf0 = vec_float(vi.vec0());
194
+ vfloat32 vecf1 = vec_float(vi.vec1());
195
+
196
+ vecf0 = vec_mul(vecf0, vec_mult);
197
+ vecf1 = vec_mul(vecf1, vec_mult);
198
+
199
+ vecf0 = vec_rint(vecf0);
200
+ vecf1 = vec_rint(vecf1);
201
+
202
+ vint32 veci0 = vec_add(vec_signed(vecf0),vec_zero_point);
203
+ vint32 veci1 = vec_add(vec_signed(vecf1),vec_zero_point);
204
+
205
+ veci0 = vec_max(veci0, vmin);
206
+ veci1 = vec_max(veci1, vmin);
207
+ veci0 = vec_min(veci0, vmax);
208
+ veci1 = vec_min(veci1, vmax);
209
+
210
+ return {veci0, veci1};
211
+ }
212
+
213
+ DEFINE_MEMBER_OP(operator==, c10::qint32, vec_cmpeq)
214
+ DEFINE_MEMBER_OP(operator!=, c10::qint32, vec_cmpne)
215
+ DEFINE_MEMBER_OP(operator<, c10::qint32, vec_cmplt)
216
+ DEFINE_MEMBER_OP(operator<=, c10::qint32, vec_cmple)
217
+ DEFINE_MEMBER_OP(operator>, c10::qint32, vec_cmpgt)
218
+ DEFINE_MEMBER_OP(operator>=, c10::qint32, vec_cmpge)
219
+ DEFINE_MEMBER_OP(operator+, c10::qint32, vec_add)
220
+ DEFINE_MEMBER_OP(operator-, c10::qint32, vec_sub)
221
+ DEFINE_MEMBER_OP(operator*, c10::qint32, vec_mul)
222
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint32, /)
223
+ DEFINE_MEMBER_OP(maximum, c10::qint32, vec_max)
224
+ DEFINE_MEMBER_OP(minimum, c10::qint32, vec_min)
225
+ DEFINE_MEMBER_OP(operator&, c10::qint32, vec_and)
226
+ DEFINE_MEMBER_OP(operator|, c10::qint32, vec_or)
227
+ DEFINE_MEMBER_OP(operator^, c10::qint32, vec_xor)
228
+ };
229
+
230
+ template <>
231
+ Vectorized<c10::qint32> inline maximum(
232
+ const Vectorized<c10::qint32>& a,
233
+ const Vectorized<c10::qint32>& b) {
234
+ return a.maximum(b);
235
+ }
236
+
237
+ template <>
238
+ Vectorized<c10::qint32> inline minimum(
239
+ const Vectorized<c10::qint32>& a,
240
+ const Vectorized<c10::qint32>& b) {
241
+ return a.minimum(b);
242
+ }
243
+ } // namespace
244
+ } // namespace vec
245
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/qint8.h>
7
+ #include <array>
8
+
9
+ // This file defines Vectorized<> for the quantized types.
10
+ //
11
+ //
12
+ // Currently, we simply use these classes as efficient converters between
13
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
14
+ // where doing the arithmetic in full-precision is acceptable (e.g.
15
+ // elementwise operators).
16
+ //
17
+ //
18
+ // Conversions are as follows:
19
+ // Vectorized<qint8> -> 4x Vectorized<float>
20
+ //
21
+ // The size of the returned float vector is specified by the special
22
+ // constexpr function float_num_vecs. The type of the value returned
23
+ // from dequantize (and expected as an argument to quantize) is
24
+ // specified by float_vec_return_type.
25
+ //
26
+ // When writing kernels with these vectors, it is expected that floating-
27
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
28
+ // iterations.
29
+
30
+ namespace at {
31
+ namespace vec {
32
+ inline namespace CPU_CAPABILITY {
33
+
34
+ template <>
35
+ struct Vectorized<c10::qint8> {
36
+ private:
37
+ union {
38
+ struct {
39
+ vint8 _vec0;
40
+ vint8 _vec1;
41
+ };
42
+ struct {
43
+ vbool8 _vecb0;
44
+ vbool8 _vecb1;
45
+ };
46
+
47
+ } __attribute__((__may_alias__));
48
+
49
+ public:
50
+ Vectorized() {}
51
+ using size_type = int;
52
+ static constexpr size_type size() {
53
+ return 32;
54
+ }
55
+
56
+ static constexpr size_t float_num_vecs() {
57
+ return 4;
58
+ }
59
+ static constexpr int int_num_vecs() {
60
+ return 4;
61
+ }
62
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
63
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
64
+ using value_type = typename c10::qint8::underlying;
65
+ using vec_internal_type = vint8;
66
+ using vec_internal_mask_type = vbool8;
67
+ // Broadcast constructor
68
+ C10_ALWAYS_INLINE Vectorized(const c10::qint8& val)
69
+ : _vec0{vec_splats(val.val_)}, _vec1{vec_splats(val.val_)} {}
70
+
71
+ C10_ALWAYS_INLINE Vectorized(const Vectorized<c10::qint8>& other)
72
+ : _vec0{other._vec0}, _vec1(other._vec1) {}
73
+
74
+ C10_ALWAYS_INLINE Vectorized(vint8 v) : _vec0{v}, _vec1{v} {}
75
+ C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
76
+ C10_ALWAYS_INLINE Vectorized(vint8 v1, vint8 v2) : _vec0{v1}, _vec1{v2} {}
77
+ C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {}
78
+
79
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
80
+ return _vec0;
81
+ }
82
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
83
+ return _vec1;
84
+ }
85
+
86
+ static C10_ALWAYS_INLINE Vectorized<c10::qint8> loadu(
87
+ const void* ptr,
88
+ int count = size()) {
89
+ if (count == size()) {
90
+ return {
91
+ vec_vsx_ld(offset0, reinterpret_cast<const vint8*>(ptr)),
92
+ vec_vsx_ld(offset16, reinterpret_cast<const vint8*>(ptr))};
93
+ }
94
+ __at_align__ value_type tmp_values[size()];
95
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
96
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
97
+ }
98
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
99
+ if (count == size()) {
100
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
101
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
102
+ } else if (count > 0) {
103
+ __at_align__ value_type tmp_values[size()];
104
+ vec_vsx_st(_vec0, offset0, tmp_values);
105
+ vec_vsx_st(_vec1, offset16, tmp_values);
106
+ std::memcpy(
107
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
108
+ }
109
+ }
110
+
111
+ public:
112
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
113
+ Vectorized<float> scale,
114
+ Vectorized<float> zero_point,
115
+ Vectorized<float> scale_zp_premul) const {
116
+ vint16 vecshi0 = vec_unpackh(_vec0);
117
+ vint16 vecshi1 = vec_unpackl(_vec0);
118
+
119
+ vint16 vecshi2 = vec_unpackh(_vec1);
120
+ vint16 vecshi3 = vec_unpackl(_vec1);
121
+
122
+ vint32 veci0 = vec_unpackh(vecshi0);
123
+ vint32 veci1 = vec_unpackl(vecshi0);
124
+
125
+ vint32 veci2 = vec_unpackh(vecshi1);
126
+ vint32 veci3 = vec_unpackl(vecshi1);
127
+
128
+ vint32 veci4 = vec_unpackh(vecshi2);
129
+ vint32 veci5 = vec_unpackl(vecshi2);
130
+
131
+ vint32 veci6 = vec_unpackh(vecshi3);
132
+ vint32 veci7 = vec_unpackl(vecshi3);
133
+
134
+ vfloat32 vecf0_0 = vec_float(veci0);
135
+ vfloat32 vecf1_0 = vec_float(veci1);
136
+
137
+ vfloat32 vecf0_1 = vec_float(veci2);
138
+ vfloat32 vecf1_1 = vec_float(veci3);
139
+
140
+ vfloat32 vecf0_2 = vec_float(veci4);
141
+ vfloat32 vecf1_2 = vec_float(veci5);
142
+
143
+ vfloat32 vecf0_3 = vec_float(veci6);
144
+ vfloat32 vecf1_3 = vec_float(veci7);
145
+ vfloat32 scale_vec0 = scale.vec0();
146
+ vfloat32 scale_vec1 = scale.vec1();
147
+ vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
148
+ vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
149
+ return {
150
+ Vectorized<float>{
151
+ vec_madd(scale_vec0, vecf0_0, scale_zp_premul0),
152
+ vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)},
153
+ Vectorized<float>{
154
+ vec_madd(scale_vec0, vecf0_1, scale_zp_premul0),
155
+ vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)},
156
+ Vectorized<float>{
157
+ vec_madd(scale_vec0, vecf0_2, scale_zp_premul0),
158
+ vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)},
159
+ Vectorized<float>{
160
+ vec_madd(scale_vec0, vecf0_3, scale_zp_premul0),
161
+ vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}};
162
+ }
163
+
164
+ static Vectorized<c10::qint8> quantize(
165
+ const float_vec_return_type& rhs,
166
+ float scale,
167
+ int32_t zero_point,
168
+ float inverse_scale) {
169
+ // constexpr int32_t min_val = std::numeric_limits<value_type>::min();
170
+ // constexpr int32_t max_val = std::numeric_limits<value_type>::max();
171
+
172
+ vfloat32 inverse_scale_v = vec_splats(inverse_scale);
173
+ vfloat32 vec_zero_point = vec_splats((float)zero_point);
174
+ // vint32 vmin = vec_splats(min_val);
175
+ // vint32 vmax = vec_splats(max_val);
176
+
177
+ Vectorized<float> vf0 = rhs[0];
178
+ Vectorized<float> vf1 = rhs[1];
179
+ Vectorized<float> vf2 = rhs[2];
180
+ Vectorized<float> vf3 = rhs[3];
181
+ vfloat32 vecf0 = vf0.vec0();
182
+ vfloat32 vecf1 = vf0.vec1();
183
+ vfloat32 vecf2 = vf1.vec0();
184
+ vfloat32 vecf3 = vf1.vec1();
185
+
186
+ vfloat32 vecf4 = vf2.vec0();
187
+ vfloat32 vecf5 = vf2.vec1();
188
+ vfloat32 vecf6 = vf3.vec0();
189
+ vfloat32 vecf7 = vf3.vec1();
190
+
191
+ vecf0 = vec_mul(vecf0, inverse_scale_v);
192
+ vecf1 = vec_mul(vecf1, inverse_scale_v);
193
+ vecf2 = vec_mul(vecf2, inverse_scale_v);
194
+ vecf3 = vec_mul(vecf3, inverse_scale_v);
195
+
196
+ vecf4 = vec_mul(vecf4, inverse_scale_v);
197
+ vecf5 = vec_mul(vecf5, inverse_scale_v);
198
+ vecf6 = vec_mul(vecf6, inverse_scale_v);
199
+ vecf7 = vec_mul(vecf7, inverse_scale_v);
200
+
201
+ vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
202
+ vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
203
+ vecf2 = vec_add(vec_rint(vecf2), vec_zero_point);
204
+ vecf3 = vec_add(vec_rint(vecf3), vec_zero_point);
205
+
206
+ vecf4 = vec_add(vec_rint(vecf4), vec_zero_point);
207
+ vecf5 = vec_add(vec_rint(vecf5), vec_zero_point);
208
+ vecf6 = vec_add(vec_rint(vecf6), vec_zero_point);
209
+ vecf7 = vec_add(vec_rint(vecf7), vec_zero_point);
210
+
211
+ vint32 veci0 = vec_signed(vecf0);
212
+ vint32 veci1 = vec_signed(vecf1);
213
+ vint32 veci2 = vec_signed(vecf2);
214
+ vint32 veci3 = vec_signed(vecf3);
215
+
216
+ vint32 veci4 = vec_signed(vecf4);
217
+ vint32 veci5 = vec_signed(vecf5);
218
+ vint32 veci6 = vec_signed(vecf6);
219
+ vint32 veci7 = vec_signed(vecf7);
220
+
221
+ // veci0 = vec_min(vmax, vec_max( vmin, vecf0)) ;
222
+ // veci1 = vec_min(vmax, vec_max( vmin, vecf1)) ;
223
+ // veci2 = vec_min(vmax, vec_max( vmin, vecf2)) ;
224
+ // veci3 = vec_min(vmax, vec_max( vmin, vecf3)) ;
225
+
226
+ // veci4 = vec_min(vmax, vec_max( vmin, vecf4)) ;
227
+ // veci5 = vec_min(vmax, vec_max( vmin, vecf5)) ;
228
+ // veci6 = vec_min(vmax, vec_max( vmin, vecf6)) ;
229
+ // veci7 = vec_min(vmax, vec_max( vmin, vecf7)) ;
230
+ // vec_packs CLAMP already
231
+ vint16 vecshi0 = vec_packs(veci0, veci1);
232
+ vint16 vecshi1 = vec_packs(veci2, veci3);
233
+ vint16 vecshi2 = vec_packs(veci4, veci5);
234
+ vint16 vecshi3 = vec_packs(veci6, veci7);
235
+
236
+ vint8 vec0 = vec_packs(vecshi0, vecshi1);
237
+ vint8 vec1 = vec_packs(vecshi2, vecshi3);
238
+
239
+ return {vec0, vec1};
240
+ }
241
+
242
+ Vectorized<c10::qint8> C10_ALWAYS_INLINE relu(Vectorized<c10::qint8> zero_point) const {
243
+ return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
244
+ }
245
+
246
+ Vectorized<c10::qint8> C10_ALWAYS_INLINE
247
+ relu6(Vectorized<c10::qint8> zero_point, Vectorized<c10::qint8> q_six) const {
248
+ vint8 max0 = vec_max(_vec0, zero_point._vec0);
249
+ vint8 max1 = vec_max(_vec1, zero_point._vec1);
250
+ return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
251
+ }
252
+
253
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
254
+ vint16 vecshi0 = vec_unpackh(_vec0);
255
+ vint16 vecBshi0 = vec_unpackh(b._vec0);
256
+ vint16 vecshi1 = vec_unpackl(_vec0);
257
+ vint16 vecBshi1 = vec_unpackl(b._vec0);
258
+
259
+ vint16 vecshi2 = vec_unpackh(_vec1);
260
+ vint16 vecBshi2 = vec_unpackh(b._vec1);
261
+ vint16 vecshi3 = vec_unpackl(_vec1);
262
+ vint16 vecBshi3 = vec_unpackl(b._vec1);
263
+
264
+ vint32 veci0 = vec_unpackh(vecshi0);
265
+ vint32 vecBi0 = vec_unpackh(vecBshi0);
266
+ vint32 veci1 = vec_unpackl(vecshi0);
267
+ vint32 vecBi1 = vec_unpackl(vecBshi0);
268
+
269
+ vint32 veci2 = vec_unpackh(vecshi1);
270
+ vint32 vecBi2 = vec_unpackh(vecBshi1);
271
+ vint32 veci3 = vec_unpackl(vecshi1);
272
+ vint32 vecBi3 = vec_unpackl(vecBshi1);
273
+
274
+ vint32 veci4 = vec_unpackh(vecshi2);
275
+ vint32 vecBi4 = vec_unpackh(vecBshi2);
276
+ vint32 veci5 = vec_unpackl(vecshi2);
277
+ vint32 vecBi5 = vec_unpackl(vecBshi2);
278
+
279
+ vint32 veci6 = vec_unpackh(vecshi3);
280
+ vint32 vecBi6 = vec_unpackh(vecBshi3);
281
+ vint32 veci7 = vec_unpackl(vecshi3);
282
+ vint32 vecBi7 = vec_unpackl(vecBshi3);
283
+
284
+ return {
285
+ Vectorized<c10::qint32>(veci0 - vecBi0, veci1 - vecBi1),
286
+ Vectorized<c10::qint32>(veci2 - vecBi2, veci3 - vecBi3),
287
+ Vectorized<c10::qint32>(veci4 - vecBi4, veci5 - vecBi5),
288
+ Vectorized<c10::qint32>(veci6 - vecBi6, veci7 - vecBi7)};
289
+ }
290
+
291
+ static Vectorized<c10::qint8> requantize_from_int(
292
+ const int_vec_return_type& inp,
293
+ float multiplier,
294
+ int32_t zero_point) {
295
+ vfloat32 vec_multiplier = vec_splats(multiplier);
296
+ vint32 vec_zero_point = vec_splats(zero_point);
297
+
298
+ Vectorized<c10::qint32> vi0 = inp[0];
299
+ Vectorized<c10::qint32> vi1 = inp[1];
300
+ Vectorized<c10::qint32> vi2 = inp[2];
301
+ Vectorized<c10::qint32> vi3 = inp[3];
302
+
303
+ vfloat32 vecf0 = vec_float(vi0.vec0());
304
+ vfloat32 vecf1 = vec_float(vi0.vec1());
305
+ vfloat32 vecf2 = vec_float(vi1.vec0());
306
+ vfloat32 vecf3 = vec_float(vi1.vec1());
307
+
308
+ vfloat32 vecf4 = vec_float(vi2.vec0());
309
+ vfloat32 vecf5 = vec_float(vi2.vec1());
310
+ vfloat32 vecf6 = vec_float(vi3.vec0());
311
+ vfloat32 vecf7 = vec_float(vi3.vec1());
312
+
313
+ vecf0 = vec_mul(vecf0, vec_multiplier);
314
+ vecf1 = vec_mul(vecf1, vec_multiplier);
315
+ vecf2 = vec_mul(vecf2, vec_multiplier);
316
+ vecf3 = vec_mul(vecf3, vec_multiplier);
317
+
318
+ vecf4 = vec_mul(vecf4, vec_multiplier);
319
+ vecf5 = vec_mul(vecf5, vec_multiplier);
320
+ vecf6 = vec_mul(vecf6, vec_multiplier);
321
+ vecf7 = vec_mul(vecf7, vec_multiplier);
322
+
323
+ vecf0 = vec_rint(vecf0);
324
+ vecf1 = vec_rint(vecf1);
325
+ vecf2 = vec_rint(vecf2);
326
+ vecf3 = vec_rint(vecf3);
327
+
328
+ vecf4 = vec_rint(vecf4);
329
+ vecf5 = vec_rint(vecf5);
330
+ vecf6 = vec_rint(vecf6);
331
+ vecf7 = vec_rint(vecf7);
332
+
333
+ vint32 veci0 = vec_signed(vecf0);
334
+ vint32 veci1 = vec_signed(vecf1);
335
+ vint32 veci2 = vec_signed(vecf2);
336
+ vint32 veci3 = vec_signed(vecf3);
337
+
338
+ vint32 veci4 = vec_signed(vecf4);
339
+ vint32 veci5 = vec_signed(vecf5);
340
+ vint32 veci6 = vec_signed(vecf6);
341
+ vint32 veci7 = vec_signed(vecf7);
342
+
343
+ veci0 = vec_add(veci0, vec_zero_point);
344
+ veci1 = vec_add(veci1, vec_zero_point);
345
+ veci2 = vec_add(veci2, vec_zero_point);
346
+ veci3 = vec_add(veci3, vec_zero_point);
347
+
348
+ veci4 = vec_add(veci4, vec_zero_point);
349
+ veci5 = vec_add(veci5, vec_zero_point);
350
+ veci6 = vec_add(veci6, vec_zero_point);
351
+ veci7 = vec_add(veci7, vec_zero_point);
352
+
353
+ vint16 vecshi0 = vec_packs(veci0, veci1);
354
+ vint16 vecshi1 = vec_packs(veci2, veci3);
355
+ vint16 vecshi2 = vec_packs(veci4, veci5);
356
+ vint16 vecshi3 = vec_packs(veci6, veci7);
357
+
358
+ vint8 vec0 = vec_packs(vecshi0, vecshi1);
359
+ vint8 vec1 = vec_packs(vecshi2, vecshi3);
360
+
361
+ return {vec0, vec1};
362
+ }
363
+
364
+ DEFINE_MEMBER_OP(operator==, c10::qint8, vec_cmpeq)
365
+ DEFINE_MEMBER_OP(operator!=, c10::qint8, vec_cmpne)
366
+ DEFINE_MEMBER_OP(operator<, c10::qint8, vec_cmplt)
367
+ DEFINE_MEMBER_OP(operator<=, c10::qint8, vec_cmple)
368
+ DEFINE_MEMBER_OP(operator>, c10::qint8, vec_cmpgt)
369
+ DEFINE_MEMBER_OP(operator>=, c10::qint8, vec_cmpge)
370
+ DEFINE_MEMBER_OP(operator+, c10::qint8, vec_add)
371
+ DEFINE_MEMBER_OP(operator-, c10::qint8, vec_sub)
372
+ DEFINE_MEMBER_OP(operator*, c10::qint8, vec_mul)
373
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint8, /)
374
+ DEFINE_MEMBER_OP(maximum, c10::qint8, vec_max)
375
+ DEFINE_MEMBER_OP(minimum, c10::qint8, vec_min)
376
+ DEFINE_MEMBER_OP(operator&, c10::qint8, vec_and)
377
+ DEFINE_MEMBER_OP(operator|, c10::qint8, vec_or)
378
+ DEFINE_MEMBER_OP(operator^, c10::qint8, vec_xor)
379
+ };
380
+
381
+ template <>
382
+ Vectorized<c10::qint8> inline maximum(
383
+ const Vectorized<c10::qint8>& a,
384
+ const Vectorized<c10::qint8>& b) {
385
+ return a.maximum(b);
386
+ }
387
+
388
+ template <>
389
+ Vectorized<c10::qint8> inline minimum(
390
+ const Vectorized<c10::qint8>& a,
391
+ const Vectorized<c10::qint8>& b) {
392
+ return a.minimum(b);
393
+ }
394
+ } // namespace
395
+ } // namespace vec
396
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+
7
+ #include <c10/util/irange.h>
8
+ #include <c10/util/quint8.h>
9
+ #include <array>
10
+
11
+ // This file defines Vectorized<> for the quantized types.
12
+ //
13
+ //
14
+ // Currently, we simply use these classes as efficient converters between
15
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
16
+ // where doing the arithmetic in full-precision is acceptable (e.g.
17
+ // elementwise operators).
18
+ //
19
+ //
20
+ // Conversions are as follows:
21
+ // Vectorized<quint8> -> 4x Vectorized<float>
22
+ //
23
+ // The size of the returned float vector is specified by the special
24
+ // constexpr function float_num_vecs. The type of the value returned
25
+ // from dequantize (and expected as an argument to quantize) is
26
+ // specified by float_vec_return_type.
27
+ //
28
+ // When writing kernels with these vectors, it is expected that floating-
29
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
30
+ // iterations.
31
+
32
+ namespace at {
33
+ namespace vec {
34
+ inline namespace CPU_CAPABILITY {
35
+
36
+ const vint16 mask_unsigned = vec_splats((short int)0xFF);
37
+ template <>
38
+ struct Vectorized<c10::quint8> {
39
+ private:
40
+ union {
41
+ struct {
42
+ vuint8 _vec0;
43
+ vuint8 _vec1;
44
+ };
45
+ struct {
46
+ vbool8 _vecb0;
47
+ vbool8 _vecb1;
48
+ };
49
+
50
+ } __attribute__((__may_alias__));
51
+
52
+ public:
53
+ Vectorized() {}
54
+ using size_type = int;
55
+ static constexpr size_type size() {
56
+ return 32;
57
+ }
58
+
59
+ static constexpr size_t float_num_vecs() {
60
+ return 4;
61
+ }
62
+ static constexpr int int_num_vecs() {
63
+ return 4;
64
+ }
65
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
66
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
67
+ using value_type = typename c10::quint8::underlying;
68
+ using vec_internal_type = vuint8;
69
+ using vec_internal_mask_type = vbool8;
70
+ // Broadcast constructor
71
+ C10_ALWAYS_INLINE Vectorized(const c10::quint8& val)
72
+ : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {}
73
+
74
+ C10_ALWAYS_INLINE Vectorized(const Vectorized<c10::quint8>& other)
75
+ : _vec0{other._vec0}, _vec1(other._vec1) {}
76
+
77
+ C10_ALWAYS_INLINE Vectorized(vuint8 v) : _vec0{v}, _vec1{v} {}
78
+ C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
79
+ C10_ALWAYS_INLINE Vectorized(vuint8 v1, vuint8 v2) : _vec0{v1}, _vec1{v2} {}
80
+ C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {}
81
+
82
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
83
+ return _vec0;
84
+ }
85
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
86
+ return _vec1;
87
+ }
88
+
89
+ static C10_ALWAYS_INLINE Vectorized<c10::quint8> loadu(
90
+ const void* ptr,
91
+ int count = size()) {
92
+ if (count == size()) {
93
+ return {
94
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
95
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
96
+ }
97
+ __at_align__ value_type tmp_values[size()];
98
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
99
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
100
+ }
101
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
102
+ if (count == size()) {
103
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
104
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
105
+ } else if (count > 0) {
106
+ __at_align__ value_type tmp_values[size()];
107
+ vec_vsx_st(_vec0, offset0, tmp_values);
108
+ vec_vsx_st(_vec1, offset16, tmp_values);
109
+ std::memcpy(
110
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
111
+ }
112
+ }
113
+
114
+ public:
115
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
116
+ Vectorized<float> scale,
117
+ Vectorized<float> zero_point,
118
+ Vectorized<float> scale_zp_premul) const {
119
+ // unpacking unsigned as signed
120
+ vint16 vecshi0 = vec_unpackh((vint8)_vec0);
121
+ vint16 vecshi1 = vec_unpackl((vint8)_vec0);
122
+
123
+ vint16 vecshi2 = vec_unpackh((vint8)_vec1);
124
+ vint16 vecshi3 = vec_unpackl((vint8)_vec1);
125
+
126
+ // signed -> unsigned
127
+ vecshi0 = vec_and(vecshi0, mask_unsigned);
128
+ vecshi1 = vec_and(vecshi1, mask_unsigned);
129
+
130
+ vecshi2 = vec_and(vecshi2, mask_unsigned);
131
+ vecshi3 = vec_and(vecshi3, mask_unsigned);
132
+
133
+ vint32 veci0 = vec_unpackh(vecshi0);
134
+ vint32 veci1 = vec_unpackl(vecshi0);
135
+
136
+ vint32 veci2 = vec_unpackh(vecshi1);
137
+ vint32 veci3 = vec_unpackl(vecshi1);
138
+
139
+ vint32 veci4 = vec_unpackh(vecshi2);
140
+ vint32 veci5 = vec_unpackl(vecshi2);
141
+
142
+ vint32 veci6 = vec_unpackh(vecshi3);
143
+ vint32 veci7 = vec_unpackl(vecshi3);
144
+
145
+ vfloat32 vecf0_0 = vec_float(veci0);
146
+ vfloat32 vecf1_0 = vec_float(veci1);
147
+
148
+ vfloat32 vecf0_1 = vec_float(veci2);
149
+ vfloat32 vecf1_1 = vec_float(veci3);
150
+
151
+ vfloat32 vecf0_2 = vec_float(veci4);
152
+ vfloat32 vecf1_2 = vec_float(veci5);
153
+
154
+ vfloat32 vecf0_3 = vec_float(veci6);
155
+ vfloat32 vecf1_3 = vec_float(veci7);
156
+ vfloat32 scale_vec0 = scale.vec0();
157
+ vfloat32 scale_vec1 = scale.vec1();
158
+ vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
159
+ vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
160
+ return {
161
+ Vectorized<float>{
162
+ vec_madd(scale_vec0, vecf0_0, scale_zp_premul0),
163
+ vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)},
164
+ Vectorized<float>{
165
+ vec_madd(scale_vec0, vecf0_1, scale_zp_premul0),
166
+ vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)},
167
+ Vectorized<float>{
168
+ vec_madd(scale_vec0, vecf0_2, scale_zp_premul0),
169
+ vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)},
170
+ Vectorized<float>{
171
+ vec_madd(scale_vec0, vecf0_3, scale_zp_premul0),
172
+ vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}};
173
+ }
174
+
175
+ static Vectorized<c10::quint8> quantize(
176
+ const float_vec_return_type& rhs,
177
+ float scale,
178
+ int32_t zero_point,
179
+ float inverse_scale) {
180
+ // constexpr int32_t min_val = std::numeric_limits<value_type>::min();
181
+ // constexpr int32_t max_val = std::numeric_limits<value_type>::max();
182
+
183
+ vfloat32 vec_inverse = vec_splats(inverse_scale);
184
+ vfloat32 vec_zero_point = vec_splats((float)zero_point);
185
+ // vuint32 vmin = vec_splats(min_val);
186
+ // vuint32 vmax = vec_splats(max_val);
187
+ Vectorized<float> vf0 = rhs[0];
188
+ Vectorized<float> vf1 = rhs[1];
189
+ Vectorized<float> vf2 = rhs[2];
190
+ Vectorized<float> vf3 = rhs[3];
191
+ vfloat32 vecf0 = vf0.vec0();
192
+ vfloat32 vecf1 = vf0.vec1();
193
+ vfloat32 vecf2 = vf1.vec0();
194
+ vfloat32 vecf3 = vf1.vec1();
195
+
196
+ vfloat32 vecf4 = vf2.vec0();
197
+ vfloat32 vecf5 = vf2.vec1();
198
+ vfloat32 vecf6 = vf3.vec0();
199
+ vfloat32 vecf7 = vf3.vec1();
200
+
201
+ vecf0 = vec_mul(vecf0, vec_inverse);
202
+ vecf1 = vec_mul(vecf1, vec_inverse);
203
+ vecf2 = vec_mul(vecf2, vec_inverse);
204
+ vecf3 = vec_mul(vecf3, vec_inverse);
205
+
206
+ vecf4 = vec_mul(vecf4, vec_inverse);
207
+ vecf5 = vec_mul(vecf5, vec_inverse);
208
+ vecf6 = vec_mul(vecf6, vec_inverse);
209
+ vecf7 = vec_mul(vecf7, vec_inverse);
210
+
211
+ vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
212
+ vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
213
+ vecf2 = vec_add(vec_rint(vecf2), vec_zero_point);
214
+ vecf3 = vec_add(vec_rint(vecf3), vec_zero_point);
215
+
216
+ vecf4 = vec_add(vec_rint(vecf4), vec_zero_point);
217
+ vecf5 = vec_add(vec_rint(vecf5), vec_zero_point);
218
+ vecf6 = vec_add(vec_rint(vecf6), vec_zero_point);
219
+ vecf7 = vec_add(vec_rint(vecf7), vec_zero_point);
220
+
221
+ vint32 veci0 = vec_signed(vecf0);
222
+ vint32 veci1 = vec_signed(vecf1);
223
+ vint32 veci2 = vec_signed(vecf2);
224
+ vint32 veci3 = vec_signed(vecf3);
225
+
226
+ vint32 veci4 = vec_signed(vecf4);
227
+ vint32 veci5 = vec_signed(vecf5);
228
+ vint32 veci6 = vec_signed(vecf6);
229
+ vint32 veci7 = vec_signed(vecf7);
230
+
231
+ vint16 vecshi0 = vec_packs(veci0, veci1);
232
+ vint16 vecshi1 = vec_packs(veci2, veci3);
233
+ vint16 vecshi2 = vec_packs(veci4, veci5);
234
+ vint16 vecshi3 = vec_packs(veci6, veci7);
235
+
236
+ vuint8 vec0 = vec_packsu(vecshi0, vecshi1);
237
+ vuint8 vec1 = vec_packsu(vecshi2, vecshi3);
238
+
239
+ return {vec0, vec1};
240
+ }
241
+
242
+ Vectorized<c10::quint8> C10_ALWAYS_INLINE relu(Vectorized<c10::quint8> zero_point) const {
243
+ return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
244
+ }
245
+
246
+ Vectorized<c10::quint8> C10_ALWAYS_INLINE
247
+ relu6(Vectorized<c10::quint8> zero_point, Vectorized<c10::quint8> q_six) const {
248
+ vuint8 max0 = vec_max(_vec0, zero_point._vec0);
249
+ vuint8 max1 = vec_max(_vec1, zero_point._vec1);
250
+ return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
251
+ }
252
+
253
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
254
+ vint16 vecshi0 = vec_unpackh((vint8)_vec0);
255
+ vint16 vecBshi0 = vec_unpackh((vint8)b._vec0);
256
+ vint16 vecshi1 = vec_unpackl((vint8)_vec0);
257
+ vint16 vecBshi1 = vec_unpackl((vint8)b._vec0);
258
+
259
+ vint16 vecshi2 = vec_unpackh((vint8)_vec1);
260
+ vint16 vecBshi2 = vec_unpackh((vint8)b._vec1);
261
+ vint16 vecshi3 = vec_unpackl((vint8)_vec1);
262
+ vint16 vecBshi3 = vec_unpackl((vint8)b._vec1);
263
+
264
+ vecshi0 = vec_and(vecshi0, mask_unsigned);
265
+ vecBshi0 = vec_and(vecBshi0, mask_unsigned);
266
+ vecshi1 = vec_and(vecshi1, mask_unsigned);
267
+ vecBshi1 = vec_and(vecBshi1, mask_unsigned);
268
+
269
+ vecshi2 = vec_and(vecshi2, mask_unsigned);
270
+ vecBshi2 = vec_and(vecBshi2, mask_unsigned);
271
+ vecshi3 = vec_and(vecshi3, mask_unsigned);
272
+ vecBshi3 = vec_and(vecBshi3, mask_unsigned);
273
+
274
+ vint32 veci0 = vec_unpackh(vecshi0);
275
+ vint32 vecBi0 = vec_unpackh(vecBshi0);
276
+ vint32 veci1 = vec_unpackl(vecshi0);
277
+ vint32 vecBi1 = vec_unpackl(vecBshi0);
278
+
279
+ vint32 veci2 = vec_unpackh(vecshi1);
280
+ vint32 vecBi2 = vec_unpackh(vecBshi1);
281
+ vint32 veci3 = vec_unpackl(vecshi1);
282
+ vint32 vecBi3 = vec_unpackl(vecBshi1);
283
+
284
+ vint32 veci4 = vec_unpackh(vecshi2);
285
+ vint32 vecBi4 = vec_unpackh(vecBshi2);
286
+ vint32 veci5 = vec_unpackl(vecshi2);
287
+ vint32 vecBi5 = vec_unpackl(vecBshi2);
288
+
289
+ vint32 veci6 = vec_unpackh(vecshi3);
290
+ vint32 vecBi6 = vec_unpackh(vecBshi3);
291
+ vint32 veci7 = vec_unpackl(vecshi3);
292
+ vint32 vecBi7 = vec_unpackl(vecBshi3);
293
+
294
+ return {
295
+ Vectorized<c10::qint32>(veci0 - vecBi0, veci1 - vecBi1),
296
+ Vectorized<c10::qint32>(veci2 - vecBi2, veci3 - vecBi3),
297
+ Vectorized<c10::qint32>(veci4 - vecBi4, veci5 - vecBi5),
298
+ Vectorized<c10::qint32>(veci6 - vecBi6, veci7 - vecBi7)};
299
+ }
300
+
301
+ static Vectorized<c10::quint8> requantize_from_int(
302
+ const int_vec_return_type& inp,
303
+ float multiplier,
304
+ int32_t zero_point) {
305
+ vfloat32 vec_multiplier = vec_splats(multiplier);
306
+ vint32 vec_zero_point = vec_splats(zero_point);
307
+
308
+ Vectorized<c10::qint32> vi0 = inp[0];
309
+ Vectorized<c10::qint32> vi1 = inp[1];
310
+ Vectorized<c10::qint32> vi2 = inp[2];
311
+ Vectorized<c10::qint32> vi3 = inp[3];
312
+
313
+ vfloat32 vecf0 = vec_float(vi0.vec0());
314
+ vfloat32 vecf1 = vec_float(vi0.vec1());
315
+ vfloat32 vecf2 = vec_float(vi1.vec0());
316
+ vfloat32 vecf3 = vec_float(vi1.vec1());
317
+
318
+ vfloat32 vecf4 = vec_float(vi2.vec0());
319
+ vfloat32 vecf5 = vec_float(vi2.vec1());
320
+ vfloat32 vecf6 = vec_float(vi3.vec0());
321
+ vfloat32 vecf7 = vec_float(vi3.vec1());
322
+
323
+ vecf0 = vec_mul(vecf0, vec_multiplier);
324
+ vecf1 = vec_mul(vecf1, vec_multiplier);
325
+ vecf2 = vec_mul(vecf2, vec_multiplier);
326
+ vecf3 = vec_mul(vecf3, vec_multiplier);
327
+
328
+ vecf4 = vec_mul(vecf4, vec_multiplier);
329
+ vecf5 = vec_mul(vecf5, vec_multiplier);
330
+ vecf6 = vec_mul(vecf6, vec_multiplier);
331
+ vecf7 = vec_mul(vecf7, vec_multiplier);
332
+
333
+ vecf0 = vec_rint(vecf0);
334
+ vecf1 = vec_rint(vecf1);
335
+ vecf2 = vec_rint(vecf2);
336
+ vecf3 = vec_rint(vecf3);
337
+
338
+ vecf4 = vec_rint(vecf4);
339
+ vecf5 = vec_rint(vecf5);
340
+ vecf6 = vec_rint(vecf6);
341
+ vecf7 = vec_rint(vecf7);
342
+
343
+ vint32 veci0 = vec_signed(vecf0);
344
+ vint32 veci1 = vec_signed(vecf1);
345
+ vint32 veci2 = vec_signed(vecf2);
346
+ vint32 veci3 = vec_signed(vecf3);
347
+
348
+ vint32 veci4 = vec_signed(vecf4);
349
+ vint32 veci5 = vec_signed(vecf5);
350
+ vint32 veci6 = vec_signed(vecf6);
351
+ vint32 veci7 = vec_signed(vecf7);
352
+
353
+ veci0 = vec_add(veci0, vec_zero_point);
354
+ veci1 = vec_add(veci1, vec_zero_point);
355
+ veci2 = vec_add(veci2, vec_zero_point);
356
+ veci3 = vec_add(veci3, vec_zero_point);
357
+
358
+ veci4 = vec_add(veci4, vec_zero_point);
359
+ veci5 = vec_add(veci5, vec_zero_point);
360
+ veci6 = vec_add(veci6, vec_zero_point);
361
+ veci7 = vec_add(veci7, vec_zero_point);
362
+
363
+ vint16 vecshi0 = vec_packs(veci0, veci1);
364
+ vint16 vecshi1 = vec_packs(veci2, veci3);
365
+ vint16 vecshi2 = vec_packs(veci4, veci5);
366
+ vint16 vecshi3 = vec_packs(veci6, veci7);
367
+
368
+ vuint8 vec0 = vec_packsu(vecshi0, vecshi1);
369
+ vuint8 vec1 = vec_packsu(vecshi2, vecshi3);
370
+
371
+ return {vec0, vec1};
372
+ }
373
+
374
+ DEFINE_MEMBER_OP(operator==, c10::quint8, vec_cmpeq)
375
+ DEFINE_MEMBER_OP(operator!=, c10::quint8, vec_cmpne)
376
+ DEFINE_MEMBER_OP(operator<, c10::quint8, vec_cmplt)
377
+ DEFINE_MEMBER_OP(operator<=, c10::quint8, vec_cmple)
378
+ DEFINE_MEMBER_OP(operator>, c10::quint8, vec_cmpgt)
379
+ DEFINE_MEMBER_OP(operator>=, c10::quint8, vec_cmpge)
380
+ DEFINE_MEMBER_OP(operator+, c10::quint8, vec_add)
381
+ DEFINE_MEMBER_OP(operator-, c10::quint8, vec_sub)
382
+ DEFINE_MEMBER_OP(operator*, c10::quint8, vec_mul)
383
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::quint8, /)
384
+ DEFINE_MEMBER_OP(maximum, c10::quint8, vec_max)
385
+ DEFINE_MEMBER_OP(minimum, c10::quint8, vec_min)
386
+ DEFINE_MEMBER_OP(operator&, c10::quint8, vec_and)
387
+ DEFINE_MEMBER_OP(operator|, c10::quint8, vec_or)
388
+ DEFINE_MEMBER_OP(operator^, c10::quint8, vec_xor)
389
+ };
390
+
391
+ template <>
392
+ Vectorized<c10::quint8> inline maximum(
393
+ const Vectorized<c10::quint8>& a,
394
+ const Vectorized<c10::quint8>& b) {
395
+ return a.maximum(b);
396
+ }
397
+
398
+ template <>
399
+ Vectorized<c10::quint8> inline minimum(
400
+ const Vectorized<c10::quint8>& a,
401
+ const Vectorized<c10::quint8>& b) {
402
+ return a.minimum(b);
403
+ }
404
+
405
+ } // namespace
406
+ } // namespace vec
407
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vsx_helpers.h ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <c10/macros/Macros.h>
4
+ #include <ATen/cpu/vec/intrinsics.h>
5
+
6
+ #if defined(__clang__)
7
+ typedef __vector __bool char vbool8;
8
+ typedef __vector __bool short vbool16;
9
+ typedef __vector __bool int vbool32;
10
+ typedef __vector __bool long long vbool64;
11
+ using vint8 = __attribute__((vector_size(16))) signed char;
12
+ using vint16 = __attribute__((vector_size(16))) signed short;
13
+ using vint32 = __attribute__((vector_size(16))) signed int;
14
+ using vint64 = __attribute__((vector_size(16))) signed long long;
15
+ using vuint8 = __attribute__((vector_size(16))) unsigned char;
16
+ using vuint16 = __attribute__((vector_size(16))) unsigned short;
17
+ using vuint32 = __attribute__((vector_size(16))) unsigned int;
18
+ using vuint64 = __attribute__((vector_size(16))) unsigned long long;
19
+ using vfloat32 = __attribute__((vector_size(16))) float;
20
+ using vfloat64 = __attribute__((vector_size(16))) double;
21
+ #else
22
+ using vbool8 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) char;
23
+ using vbool16 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) short;
24
+ using vbool32 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) int;
25
+ using vbool64 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) long long;
26
+ using vint8 = __attribute__((altivec(vector__))) signed char;
27
+ using vint16 = __attribute__((altivec(vector__))) signed short;
28
+ using vint32 = __attribute__((altivec(vector__))) signed int;
29
+ using vint64 = __attribute__((altivec(vector__))) signed long long;
30
+ using vuint8 = __attribute__((altivec(vector__))) unsigned char;
31
+ using vuint16 = __attribute__((altivec(vector__))) unsigned short;
32
+ using vuint32 = __attribute__((altivec(vector__))) unsigned int;
33
+ using vuint64 = __attribute__((altivec(vector__))) unsigned long long;
34
+ using vfloat32 = __attribute__((altivec(vector__))) float;
35
+ using vfloat64 = __attribute__((altivec(vector__))) double;
36
+ #endif
37
+
38
+ #if !defined(vec_float)
39
+ C10_ALWAYS_INLINE vfloat32 vec_float(const vint32& vec_in) {
40
+ vfloat32 vec_out;
41
+ __asm__("xvcvsxwsp %x0,%x1" : "=wf"(vec_out) : "wa"(vec_in));
42
+ return vec_out;
43
+ }
44
+ #endif
45
+
46
+ #if !defined(vec_signed)
47
+ C10_ALWAYS_INLINE vint32 vec_signed(const vfloat32& vec_in) {
48
+ vint32 vec_out;
49
+ __asm__("xvcvspsxws %x0,%x1" : "=wa"(vec_out) : "wf"(vec_in));
50
+ return vec_out;
51
+ }
52
+
53
+ C10_ALWAYS_INLINE vint64 vec_signed(const vfloat64& vec_in) {
54
+ vint64 vec_out;
55
+ __asm__("xvcvdpsxds %x0,%x1" : "=wa"(vec_out) : "wd"(vec_in));
56
+ return vec_out;
57
+ }
58
+ #endif
59
+
60
+ #if !defined(vec_neg)
61
+ C10_ALWAYS_INLINE vfloat32 vec_neg(const vfloat32& vec_in) {
62
+ vfloat32 vec_out;
63
+ __asm__("xvnegsp %x0,%x1" : "=wf"(vec_out) : "wf"(vec_in));
64
+ return vec_out;
65
+ }
66
+
67
+ C10_ALWAYS_INLINE vfloat64 vec_neg(const vfloat64& vec_in) {
68
+ vfloat64 vec_out;
69
+ __asm__("xvnegdp %x0,%x1" : "=wd"(vec_out) : "wd"(vec_in));
70
+ return vec_out;
71
+ }
72
+
73
+ C10_ALWAYS_INLINE vint16 vec_neg(const vint16& vec_in) {
74
+ vint16 vint0 = {0, 0, 0, 0 ,0, 0, 0, 0};
75
+ return vec_vsubuhm(vint0, vec_in);
76
+ }
77
+
78
+ C10_ALWAYS_INLINE vint32 vec_neg(const vint32& vec_in) {
79
+ vint32 vint0 = {0, 0, 0, 0};
80
+ return vec_vsubuwm(vint0, vec_in);
81
+ }
82
+
83
+ C10_ALWAYS_INLINE vint64 vec_neg(const vint64& vec_in) {
84
+ return -vec_in;
85
+ }
86
+ #endif
87
+
88
+ #if !defined(vec_sldw)
89
+ template <unsigned int C>
90
+ C10_ALWAYS_INLINE vfloat32
91
+ vec_sldw_aux(const vfloat32& vec_in0, const vfloat32& vec_in1) {
92
+ vfloat32 vec_out;
93
+ __asm("xxsldwi %x0, %x1, %x2, %3 "
94
+ : "=wa"(vec_out)
95
+ : "wa"(vec_in0), "wa"(vec_in1), "I"(C));
96
+ return vec_out;
97
+ }
98
+
99
+ #define vec_sldw(a, b, c) vec_sldw_aux<c>(a, b)
100
+ #endif
101
+
102
+ #define vec_not(a) vec_nor(a, a)
103
+ #if defined(__clang__) && !defined(vec_splats)
104
+ C10_ALWAYS_INLINE vint64 vec_splats(const int64_t& a) {
105
+ return vec_splats(a);
106
+ }
107
+ #endif
108
+ // Vectorized min/max which return a if any operand is nan
109
+ template <class T>
110
+ C10_ALWAYS_INLINE T vec_min_nan(const T& a, const T& b) {
111
+ return vec_min(a, b);
112
+ }
113
+ template <class T>
114
+ C10_ALWAYS_INLINE T vec_max_nan(const T& a, const T& b) {
115
+ return vec_max(a, b);
116
+ }
117
+
118
+ // Specializations for float/double taken from Eigen
119
+ template<>
120
+ C10_ALWAYS_INLINE vfloat32 vec_min_nan<vfloat32>(const vfloat32& a, const vfloat32& b)
121
+ {
122
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
123
+ vfloat32 ret;
124
+ __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
125
+ return ret;
126
+ }
127
+ // Specializations for float/double taken from Eigen
128
+ template<>
129
+ C10_ALWAYS_INLINE vfloat32 vec_max_nan<vfloat32>(const vfloat32& a, const vfloat32& b)
130
+ {
131
+ // NOTE: about 10% slower than vec_max, but consistent with std::min and SSE regarding NaN
132
+ vfloat32 ret;
133
+ __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
134
+ return ret;
135
+ }
136
+
137
+ template<>
138
+ C10_ALWAYS_INLINE vfloat64 vec_min_nan<vfloat64>(const vfloat64& a, const vfloat64& b)
139
+ {
140
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
141
+ vfloat64 ret;
142
+ __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
143
+ return ret;
144
+ }
145
+ template<>
146
+ C10_ALWAYS_INLINE vfloat64 vec_max_nan<vfloat64>(const vfloat64& a, const vfloat64& b)
147
+ {
148
+ // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN
149
+ vfloat64 ret;
150
+ __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
151
+ return ret;
152
+ }
153
+
154
+ // Vectorizes min/max function which returns nan if any side is nan
155
+ #define C10_VSX_VEC_NAN_PROPAG(name, type, btype, func) \
156
+ C10_ALWAYS_INLINE type name(const type& a, const type& b) { \
157
+ type tmp = func(a, b); \
158
+ btype nan_a = vec_cmpne(a, a); \
159
+ btype nan_b = vec_cmpne(b, b); \
160
+ tmp = vec_sel(tmp, a, nan_a); \
161
+ return vec_sel(tmp, b, nan_b); \
162
+ }
163
+
164
+ C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat32, vbool32, vec_min)
165
+ C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat32, vbool32, vec_max)
166
+ C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat64, vbool64, vec_min)
167
+ C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat64, vbool64, vec_max)
168
+
169
+ #undef C10_VSX_VEC_NAN_PROPAG
170
+
171
+ #define DEFINE_MEMBER_UNARY_OP(op, op_type, func) \
172
+ Vectorized<op_type> C10_ALWAYS_INLINE op() const { \
173
+ return Vectorized<op_type>{func(_vec0), func(_vec1)}; \
174
+ }
175
+
176
+ #define DEFINE_MEMBER_OP(op, op_type, func) \
177
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \
178
+ return Vectorized<op_type>{ \
179
+ func(_vec0, other._vec0), func(_vec1, other._vec1)}; \
180
+ }
181
+
182
+ #define DEFINE_MEMBER_BITWISE_OP(op, op_type, func) \
183
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \
184
+ return Vectorized<op_type>{ \
185
+ func(_vecb0, other._vecb0), func(_vecb1, other._vecb1)}; \
186
+ }
187
+
188
+ #define DEFINE_MEMBER_TERNARY_OP(op, op_type, func) \
189
+ Vectorized<op_type> C10_ALWAYS_INLINE op( \
190
+ const Vectorized<op_type>& b, const Vectorized<op_type>& c) const { \
191
+ return Vectorized<op_type>{ \
192
+ func(_vec0, b._vec0, c._vec0), func(_vec1, b._vec1, c._vec1)}; \
193
+ }
194
+
195
+ #define DEFINE_MEMBER_EMULATE_BINARY_OP(op, op_type, binary_op) \
196
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& b) const { \
197
+ Vectorized<op_type>::vec_internal_type ret_0; \
198
+ Vectorized<op_type>::vec_internal_type ret_1; \
199
+ for (int i = 0; i < Vectorized<op_type>::size() / 2; i++) { \
200
+ ret_0[i] = _vec0[i] binary_op b._vec0[i]; \
201
+ ret_1[i] = _vec1[i] binary_op b._vec1[i]; \
202
+ } \
203
+ return Vectorized<op_type>{ret_0, ret_1}; \
204
+ }
205
+
206
+
207
+ #define DEFINE_MEMBER_OP_AND_ONE(op, op_type, func) \
208
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \
209
+ using vvtype = Vectorized<op_type>::vec_internal_type; \
210
+ const vvtype v_one = vec_splats(static_cast<op_type>(1.0)); \
211
+ vvtype ret0 = (vvtype)func(_vec0, other._vec0); \
212
+ vvtype ret1 = (vvtype)func(_vec1, other._vec1); \
213
+ return Vectorized<op_type>{vec_and(ret0, v_one), vec_and(ret1, v_one)}; \
214
+ }
215
+
216
+ #define DEFINE_CLAMP_FUNCS(operand_type) \
217
+ template <> \
218
+ Vectorized<operand_type> C10_ALWAYS_INLINE clamp( \
219
+ const Vectorized<operand_type>& a, \
220
+ const Vectorized<operand_type>& min, \
221
+ const Vectorized<operand_type>& max) { \
222
+ return Vectorized<operand_type>{ \
223
+ vec_min_nan(vec_max_nan(a.vec0(), min.vec0()), max.vec0()), \
224
+ vec_min_nan(vec_max_nan(a.vec1(), min.vec1()), max.vec1())}; \
225
+ } \
226
+ template <> \
227
+ Vectorized<operand_type> C10_ALWAYS_INLINE clamp_min( \
228
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& min) { \
229
+ return Vectorized<operand_type>{ \
230
+ vec_max_nan(a.vec0(), min.vec0()), \
231
+ vec_max_nan(a.vec1(), min.vec1())}; \
232
+ } \
233
+ template <> \
234
+ Vectorized<operand_type> C10_ALWAYS_INLINE clamp_max( \
235
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& max) { \
236
+ return Vectorized<operand_type>{ \
237
+ vec_min_nan(a.vec0(), max.vec0()), \
238
+ vec_min_nan(a.vec1(), max.vec1())}; \
239
+ }
240
+
241
+ #define DEFINE_REINTERPRET_CAST_FUNCS( \
242
+ first_type, cast_type, cast_inner_vector_type) \
243
+ template <> \
244
+ C10_ALWAYS_INLINE Vectorized<cast_type> cast<cast_type, first_type>( \
245
+ const Vectorized<first_type>& src) { \
246
+ return Vectorized<cast_type>{(cast_inner_vector_type)src.vec0(), \
247
+ (cast_inner_vector_type)src.vec1()}; \
248
+ }
249
+
250
+ #define DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(first_type) \
251
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, double, vfloat64) \
252
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, float, vfloat32) \
253
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int64_t, vint64) \
254
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int32_t, vint32) \
255
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int16_t, vint16)
256
+
257
+ // it can be used to emulate blend faster
258
+ constexpr int blendChoice(uint32_t mask, uint32_t half1 = 0xF, uint32_t half2 = 0xF0) {
259
+ uint32_t none = 0;
260
+ uint32_t both = half1 | half2;
261
+ // clamp it between 0 and both
262
+ mask = mask & both;
263
+ // return (a._vec0, a._vec1)
264
+ if (mask == none) return 0;
265
+ // return (b._vec0,b._vec1)
266
+ else if (mask == both)
267
+ return 1;
268
+ // return (b._vec0,a._vec1)
269
+ else if (mask == half1)
270
+ return 2;
271
+ // return (a._vec0,b._vec1)
272
+ else if (mask == half2)
273
+ return 3;
274
+ // return (*_vec0,a._vec1)
275
+ else if (mask > 0 && mask < half1)
276
+ return 4;
277
+ // return (*_vec0,b._vec1)
278
+ else if ((mask & half2) == half2)
279
+ return 5;
280
+ // return (a._vec0,*_vec1)
281
+ else if ((mask & half1) == 0 && mask > half1)
282
+ return 6;
283
+ // return (b._vec0,*_vec1)
284
+ else if ((mask & half1) == half1 && mask > half1)
285
+ return 7;
286
+ // return (*_vec0,*_vec1)
287
+ return 8;
288
+ }
289
+
290
+ // it can be used to emulate blend faster
291
+ constexpr int blendChoiceDbl(uint32_t mask) {
292
+ // clamp it 0 and 0xF
293
+ return blendChoice(mask, 0x3, 0xC);
294
+ }
295
+
296
+ constexpr vbool32 VsxMask1(uint32_t mask) {
297
+ uint32_t g0 = (mask & 1) * 0xffffffff;
298
+ uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
299
+ uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
300
+ uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
301
+ return (vbool32){g0, g1, g2, g3};
302
+ }
303
+
304
+ constexpr vbool32 VsxMask2(uint32_t mask) {
305
+ uint32_t mask2 = (mask & 0xFF) >> 4;
306
+ return VsxMask1(mask2);
307
+ }
308
+
309
+ constexpr vbool64 VsxDblMask1(uint32_t mask) {
310
+ uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
311
+ uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
312
+ return (vbool64){g0, g1};
313
+ }
314
+
315
+ constexpr vbool64 VsxDblMask2(uint32_t mask) {
316
+ uint32_t mask2 = (mask & 0xF) >> 2;
317
+ return VsxDblMask1(mask2);
318
+ }
319
+
320
+ constexpr int maskForComplex(uint32_t mask) {
321
+ mask = mask & 0xF;
322
+ int complex_mask = 0;
323
+ if (mask & 1) complex_mask |= 3;
324
+ if (mask & 2) complex_mask |= (3 << 2);
325
+ if (mask & 4) complex_mask |= (3 << 4);
326
+ if (mask & 8) complex_mask |= (3 << 6);
327
+ return complex_mask;
328
+ }
329
+
330
+ constexpr int maskForComplexDbl(uint32_t mask) {
331
+ mask = mask & 0x3;
332
+ int complex_mask = 0;
333
+ if (mask & 1) complex_mask |= 3;
334
+ if (mask & 2) complex_mask |= (3 << 2);
335
+ return complex_mask;
336
+ }
337
+
338
+ constexpr int blendChoiceComplex(uint32_t mask) {
339
+ return blendChoice(maskForComplex(mask));
340
+ }
341
+
342
+ constexpr int blendChoiceComplexDbl(uint32_t mask) {
343
+ return blendChoiceDbl(maskForComplexDbl(mask));
344
+ }
345
+
346
+ constexpr vbool32 VsxComplexMask1(uint32_t mask) {
347
+ return VsxMask1(maskForComplex(mask));
348
+ }
349
+
350
+ constexpr vbool32 VsxComplexMask2(uint32_t mask) {
351
+ uint32_t mask2 = (mask & 0xF) >> 2;
352
+ return VsxMask1(maskForComplex(mask2));
353
+ }
354
+
355
+ constexpr vbool64 VsxComplexDblMask1(uint32_t mask) { return VsxDblMask1(mask); }
356
+
357
+ constexpr vbool64 VsxComplexDblMask2(uint32_t mask) {
358
+ uint32_t mask2 = (mask & 0xF) >> 2;
359
+ return VsxDblMask1(mask2);
360
+ }
361
+
362
+ // constants
363
+ namespace at {
364
+ namespace vec {
365
+ // See Note [CPU_CAPABILITY namespace]
366
+ inline namespace CPU_CAPABILITY {
367
+ //
368
+ constexpr int offset0 = 0;
369
+ constexpr int offset16 = 16;
370
+
371
+ // #Constants
372
+ const vuint8 mask_zero_bits = vuint8{128, 128, 128, 128, 128, 128, 128, 128,
373
+ 128, 128, 128, 128, 96, 64, 32, 0};
374
+
375
+ const vuint8 swap_mask =
376
+ vuint8{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
377
+
378
+ const vint32 v0x7f = vec_splats(0x7f);
379
+ const vint32 vi_0 = vec_splats((int)(0));
380
+ const vint32 vi_1 = vec_splats((int)1);
381
+ const vint32 vi_2 = vec_splats((int)2);
382
+ const vint32 vi_4 = vec_splats((int)4);
383
+ const vint32 vi_inv1 = vec_splats((int)~1);
384
+ const vuint32 vu_29 = vec_splats(29u);
385
+ const vuint32 vu_23 = vec_splats(23u);
386
+
387
+ const vbool32 inv_mant_mask = (vbool32)vec_splats((unsigned int)~0xff800000);
388
+ const vbool32 sign_mask = (vbool32)vec_splats((int)0x80000000);
389
+ const vbool32 real_mask = vbool32{0xFFFFFFFF, 0x0, 0xFFFFFFFF, 0x0};
390
+ const vbool32 imag_mask = vbool32{0x0, 0xFFFFFFFF, 0x0, 0xFFFFFFFF};
391
+ const vbool32 isign_mask = vbool32{0x0, 0x80000000, 0x0, 0x80000000};
392
+ const vbool32 rsign_mask = vbool32{0x80000000, 0x0, 0x80000000, 0x0};
393
+
394
+ const vbool64 vd_imag_mask = vbool64{0x0, 0xFFFFFFFFFFFFFFFF};
395
+ const vbool64 vd_real_mask = vbool64{0xFFFFFFFFFFFFFFFF, 0x0};
396
+ const vbool64 vd_isign_mask = vbool64{0x0, 0x8000000000000000};
397
+ const vbool64 vd_rsign_mask = vbool64{0x8000000000000000, 0x0};
398
+
399
+ const vfloat32 zero = vec_splats(0.f);
400
+ const vfloat32 half = vec_splats(0.5f);
401
+ const vfloat32 one = vec_splats(1.f);
402
+ const vfloat32 two = vec_splats(2.0f);
403
+ const vfloat32 _4div_pi = vec_splats(1.27323954473516f);
404
+ const vfloat32 v_inf = (vfloat32)vec_splats(0x7f800000u);
405
+ const vfloat32 v_minus_inf = vfloat32{ 0xff800000u, 0xff800000u, 0xff800000u, 0xff800000u };
406
+ const vfloat32 v_nan = (vfloat32)vec_splats(0x7fffffff);
407
+ const vfloat32 log10e_inv = vec_splats(0.43429448190325176f);
408
+ const vfloat32 log2e_inv = vec_splats(1.4426950408889634f);
409
+ const vfloat32 log2eB_inv = vec_splats(1.442695036924675f);
410
+ const vfloat32 cephes_SQRTHF = vec_splats(0.707106781186547524f);
411
+ const vfloat32 coscof_p0 = vec_splats(2.443315711809948E-005f);
412
+ const vfloat32 coscof_p1 = vec_splats(-1.388731625493765E-003f);
413
+ const vfloat32 coscof_p2 = vec_splats(4.166664568298827E-002f);
414
+ const vfloat32 exp_hi = vec_splats(104.f);
415
+ const vfloat32 exp_lo = vec_splats(-104.f);
416
+ const vfloat32 exp_p0 = vec_splats(0.000198527617612853646278381f);
417
+ const vfloat32 exp_p1 = vec_splats((0.00139304355252534151077271f));
418
+ const vfloat32 exp_p2 = vec_splats(0.00833336077630519866943359f);
419
+ const vfloat32 exp_p3 = vec_splats(0.0416664853692054748535156f);
420
+ const vfloat32 exp_p4 = vec_splats(0.166666671633720397949219f);
421
+ const vfloat32 exp_p5 = vec_splats(0.5f);
422
+ const vfloat32 log_p0 = vec_splats(7.0376836292E-2f);
423
+ const vfloat32 log_p1 = vec_splats(-1.1514610310E-1f);
424
+ const vfloat32 log_p2 = vec_splats(1.1676998740E-1f);
425
+ const vfloat32 log_p3 = vec_splats(-1.2420140846E-1f);
426
+ const vfloat32 log_p4 = vec_splats(+1.4249322787E-1f);
427
+ const vfloat32 log_p5 = vec_splats(-1.6668057665E-1f);
428
+ const vfloat32 log_p6 = vec_splats(+2.0000714765E-1f);
429
+ const vfloat32 log_p7 = vec_splats(-2.4999993993E-1f);
430
+ const vfloat32 log_p8 = vec_splats(+3.3333331174E-1f);
431
+ const vfloat32 log_q1 = vec_splats(-2.12194440e-4f);
432
+ const vfloat32 log_q2 = vec_splats(0.693359375f);
433
+ const vfloat32 max_logf = vec_splats(88.02969187150841f);
434
+ const vfloat32 max_numf = vec_splats(1.7014117331926442990585209174225846272e38f);
435
+ const vfloat32 min_inf = (vfloat32)vec_splats(0xff800000u);
436
+ const vfloat32 min_norm_pos = (vfloat32)vec_splats(0x0800000u);
437
+ const vfloat32 minus_cephes_dp1 = vec_splats(-0.78515625f);
438
+ const vfloat32 minus_cephes_dp2 = vec_splats(-2.4187564849853515625e-4f);
439
+ const vfloat32 minus_cephes_dp3 = vec_splats(-3.77489497744594108e-8f);
440
+ const vfloat32 negln2f_hi = vec_splats(-0.693145751953125f);
441
+ const vfloat32 negln2f_lo = vec_splats(-1.428606765330187045e-06f);
442
+ const vfloat32 p0 = vec_splats(2.03721912945E-4f);
443
+ const vfloat32 p1 = vec_splats(8.33028376239E-3f);
444
+ const vfloat32 p2 = vec_splats(1.66667160211E-1f);
445
+ const vfloat32 sincof_p0 = vec_splats(-1.9515295891E-4f);
446
+ const vfloat32 sincof_p1 = vec_splats(8.3321608736E-3f);
447
+ const vfloat32 sincof_p2 = vec_splats(-1.6666654611E-1f);
448
+ const vfloat32 tanh_0p625 = vec_splats(0.625f);
449
+ const vfloat32 tanh_half_max = vec_splats(44.014845935754205f);
450
+ const vfloat32 tanh_p0 = vec_splats(-5.70498872745E-3f);
451
+ const vfloat32 tanh_p1 = vec_splats(2.06390887954E-2f);
452
+ const vfloat32 tanh_p2 = vec_splats(-5.37397155531E-2f);
453
+ const vfloat32 tanh_p3 = vec_splats(1.33314422036E-1f);
454
+ const vfloat32 tanh_p4 = vec_splats(-3.33332819422E-1f);
455
+ const vfloat32 vcheck = vec_splats((float)(1LL << 24));
456
+ const vfloat32 imag_one = vfloat32{0.f, 1.f, 0.f, 1.f};
457
+ const vfloat32 imag_half = vfloat32{0.f, 0.5f, 0.f, 0.5f};
458
+ const vfloat32 sqrt2_2 = vfloat32{0.70710676908493042f, 0.70710676908493042,
459
+ 0.70710676908493042, 0.70710676908493042};
460
+ const vfloat32 pi_2 = vfloat32{M_PI / 2, 0.0, M_PI / 2, 0.0};
461
+ const vfloat32 vf_89 = vfloat32{89.f, 89.f, 89.f, 89.f};
462
+ const vfloat64 vd_one = vec_splats(1.0);
463
+ const vfloat64 vd_zero = vec_splats(0.0);
464
+ const vfloat64 vd_log10e_inv = vec_splats(0.43429448190325176);
465
+ const vfloat64 vd_log2e_inv = vec_splats(1.4426950408889634);
466
+ const vfloat64 vd_imag_one = vfloat64{0.0, 1.0};
467
+ const vfloat64 vd_imag_half = vfloat64{0.0, 0.5};
468
+ const vfloat64 vd_sqrt2_2 = vfloat64{0.70710678118654757, 0.70710678118654757};
469
+ const vfloat64 vd_pi_2 = vfloat64{M_PI / 2.0, 0.0};
470
+
471
+ } // namespace
472
+ } // namespace vec
473
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h ADDED
@@ -0,0 +1,1077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+ //
6
+ // Note [Do not compile initializers with AVX]
7
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8
+ // If you define a static initializer in this file, the initialization will use
9
+ // AVX instructions because these object files are compiled with AVX enabled.
10
+ // We need to avoid non-trivial global data in these architecture specific files
11
+ // because there's no way to guard the global initializers with CPU capability
12
+ // detection.
13
+ //
14
+ // See https://github.com/pytorch/pytorch/issues/37577 for an instance
15
+ // of this bug in the past.
16
+
17
+ #include <array>
18
+ #include <algorithm>
19
+ #include <cassert>
20
+ #include <cstring>
21
+ #include <functional>
22
+ #include <cmath>
23
+ #include <type_traits>
24
+ #include <climits>
25
+
26
+ #include <ATen/cpu/vec/intrinsics.h>
27
+ #include <ATen/native/Math.h>
28
+ #include <ATen/NumericUtils.h>
29
+ #include <c10/util/C++17.h>
30
+ #include <c10/util/Half.h>
31
+ #include <c10/util/BFloat16.h>
32
+ #include <c10/util/BFloat16-math.h>
33
+ #include <c10/util/copysign.h>
34
+ #include <c10/util/math_compat.h>
35
+ #include <ATen/native/cpu/zmath.h>
36
+ #include <c10/util/TypeCast.h>
37
+ #include <c10/macros/Macros.h>
38
+ #include <c10/util/irange.h>
39
+ #include <c10/util/Load.h>
40
+
41
+ // These macros helped us unify vec_base.h
42
+ #ifdef CPU_CAPABILITY_AVX512
43
+ #if defined(__GNUC__)
44
+ #define __at_align__ __attribute__((aligned(64)))
45
+ #elif defined(_WIN32)
46
+ #define __at_align__ __declspec(align(64))
47
+ #else
48
+ #define __at_align__
49
+ #endif
50
+ #define VECTOR_WIDTH 64
51
+ #define int_vector __m512i
52
+ #else // CPU_CAPABILITY_AVX512
53
+ #if defined(__GNUC__)
54
+ #define __at_align__ __attribute__((aligned(32)))
55
+ #elif defined(_WIN32)
56
+ #define __at_align__ __declspec(align(32))
57
+ #else
58
+ #define __at_align__
59
+ #endif
60
+ #define VECTOR_WIDTH 32
61
+ #define int_vector __m256i
62
+ #endif // CPU_CAPABILITY_AVX512
63
+
64
+ namespace at::vec {
65
+ // See Note [CPU_CAPABILITY namespace]
66
+ inline namespace CPU_CAPABILITY {
67
+ // at::Half and at::BFloat16 should be treated as floating point
68
+ template <typename T>
69
+ struct is_floating_point:
70
+ std::integral_constant<bool,
71
+ std::is_floating_point<T>::value ||
72
+ std::is_same<T, at::Half>::value ||
73
+ std::is_same<T, at::BFloat16>::value> {
74
+ };
75
+
76
+ template<typename T>
77
+ constexpr bool is_floating_point_v = is_floating_point<T>::value;
78
+
79
+ template <typename T>
80
+ struct is_reduced_floating_point:
81
+ std::integral_constant<bool,
82
+ std::is_same<T, at::Half>::value ||
83
+ std::is_same<T, at::BFloat16>::value> {
84
+ };
85
+
86
+ template <typename T>
87
+ constexpr bool is_reduced_floating_point_v = is_reduced_floating_point<T>::value;
88
+
89
+ template<size_t n> struct int_of_size;
90
+
91
+ #define DEFINE_INT_OF_SIZE(int_t) \
92
+ template<> struct int_of_size<sizeof(int_t)> { using type = int_t; }
93
+
94
+ DEFINE_INT_OF_SIZE(int64_t);
95
+ DEFINE_INT_OF_SIZE(int32_t);
96
+ DEFINE_INT_OF_SIZE(int16_t);
97
+ DEFINE_INT_OF_SIZE(int8_t);
98
+
99
+ #undef DEFINE_INT_OF_SIZE
100
+
101
+ template <typename T>
102
+ using int_same_size_t = typename int_of_size<sizeof(T)>::type;
103
+
104
+ // NOTE: If you specialize on a type, you must define all operations!
105
+
106
+ // emulates Vectorized types
107
+ #if defined(__s390x__)
108
+ template <class T, class TEMP=void>
109
+ #else
110
+ template <class T>
111
+ #endif
112
+ struct Vectorized {
113
+ private:
114
+ __at_align__ T values[VECTOR_WIDTH / sizeof(T)];
115
+ public:
116
+ using value_type = T;
117
+ using size_type = int;
118
+ // Note [constexpr static function to avoid odr-usage compiler bug]
119
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
120
+ // Why, you might ask, is size defined to be a static constexpr function,
121
+ // rather than a more ordinary 'static constexpr int size;' variable?
122
+ // The problem lies within ODR rules for static constexpr members versus
123
+ // static constexpr functions. First, recall that this class (along with all
124
+ // of its derivations) live in an anonymous namespace: they are intended to be
125
+ // *completely* inlined at their use-sites, because we need to compile it
126
+ // multiple times for different instruction sets.
127
+ //
128
+ // Because of this constraint, we CANNOT provide a single definition for
129
+ // any static members in this class; since we want to compile the class
130
+ // multiple times, there wouldn't actually be any good place to put the
131
+ // definition. Now here is the problem: if we ODR-use a static constexpr
132
+ // member, we are *obligated* to provide a definition. Without the
133
+ // definition, you get a compile error like:
134
+ //
135
+ // relocation R_X86_64_PC32 against undefined symbol
136
+ // `_ZN2at6vec25612_GLOBAL__N_16VectorizedIdE4sizeE' can not be used when making
137
+ // a shared object; recompile with -fPIC
138
+ //
139
+ // If this were C++17, we could replace a static constexpr variable with
140
+ // an inline variable which doesn't require one definition. But we are not
141
+ // C++17. So the next best thing is to replace the member with a static
142
+ // constexpr (and therefore inline) function, which does not require ODR
143
+ // either.
144
+ //
145
+ // Also, technically according to the C++ standard, we don't have to define
146
+ // a constexpr variable if we never odr-use it. But it seems that some
147
+ // versions GCC/Clang have buggy determinations on whether or not an
148
+ // identifier is odr-used or not, and in any case it's hard to tell if
149
+ // a variable is odr-used or not. So best to just cut the problem at the root.
150
+ static constexpr size_type size() {
151
+ return VECTOR_WIDTH / sizeof(T);
152
+ }
153
+ Vectorized() : values{static_cast<T>(0)} {}
154
+ Vectorized(T val) {
155
+ for (int i = 0; i != size(); i++) {
156
+ values[i] = val;
157
+ }
158
+ }
159
+ template<typename... Args,
160
+ typename = std::enable_if_t<(sizeof...(Args) == size())>>
161
+ Vectorized(Args... vals) : values{vals...}{
162
+ }
163
+ // This also implies const T& operator[](int idx) const
164
+ inline operator const T*() const {
165
+ return values;
166
+ }
167
+ // This also implies T& operator[](int idx)
168
+ inline operator T*() {
169
+ return values;
170
+ }
171
+ // Return the values as char* for type punning
172
+ auto as_bytes() const -> const char* {
173
+ return reinterpret_cast<const char*>(values);
174
+ }
175
+ template <int64_t mask_>
176
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
177
+ int64_t mask = mask_;
178
+ Vectorized vector;
179
+ for (const auto i : c10::irange(size())) {
180
+ if (mask & 0x01) {
181
+ vector[i] = b[i];
182
+ } else {
183
+ vector[i] = a[i];
184
+ }
185
+ mask = mask >> 1;
186
+ }
187
+ return vector;
188
+ }
189
+ static Vectorized<T> blendv(const Vectorized<T>& a, const Vectorized<T>& b,
190
+ const Vectorized<T>& mask) {
191
+ Vectorized vector;
192
+ int_same_size_t<T> buffer[size()];
193
+ mask.store(buffer);
194
+ for (const auto i : c10::irange(size())) {
195
+ if (buffer[i] & 0x01)
196
+ {
197
+ vector[i] = b[i];
198
+ } else {
199
+ vector[i] = a[i];
200
+ }
201
+ }
202
+ return vector;
203
+ }
204
+ template<typename step_t> // step sometimes requires a higher precision type (e.g., T=int, step_t=double)
205
+ static Vectorized<T> arange(T base = static_cast<T>(0), step_t step = static_cast<step_t>(1)) {
206
+ Vectorized vector;
207
+ for (const auto i : c10::irange(size())) {
208
+ vector.values[i] = base + i * step;
209
+ }
210
+ return vector;
211
+ }
212
+ static Vectorized<T> set(const Vectorized<T>& a, const Vectorized<T>& b, int64_t count = size()) {
213
+ Vectorized vector;
214
+ for (const auto i : c10::irange(size())) {
215
+ if (i < count) {
216
+ vector[i] = b[i];
217
+ } else {
218
+ vector[i] = a[i];
219
+ }
220
+ }
221
+ return vector;
222
+ }
223
+ static Vectorized<T> loadu(const void* ptr) {
224
+ Vectorized vector;
225
+ std::memcpy(vector.values, ptr, VECTOR_WIDTH);
226
+ return vector;
227
+ }
228
+ static Vectorized<T> loadu(const void* ptr, int64_t count) {
229
+ Vectorized vector;
230
+ std::memcpy(vector.values, ptr, count * sizeof(T));
231
+ return vector;
232
+ }
233
+ void store(void* ptr, int count = size()) const {
234
+ std::memcpy(ptr, values, count * sizeof(T));
235
+ }
236
+ int zero_mask() const {
237
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
238
+ int mask = 0;
239
+ for (int i = 0; i < size(); ++ i) {
240
+ if (values[i] == static_cast<T>(0)) {
241
+ mask |= (1 << i);
242
+ }
243
+ }
244
+ return mask;
245
+ }
246
+ Vectorized<T> isnan() const {
247
+ Vectorized<T> vector;
248
+ for (int64_t i = 0; i != size(); i++) {
249
+ if (_isnan(values[i])) {
250
+ std::memset(static_cast<void*>(vector.values + i), 0xFF, sizeof(T));
251
+ } else {
252
+ std::memset(static_cast<void*>(vector.values + i), 0, sizeof(T));
253
+ }
254
+ }
255
+ return vector;
256
+ }
257
+ Vectorized<T> map(T (*const f)(T)) const {
258
+ Vectorized<T> ret;
259
+ for (int64_t i = 0; i != size(); i++) {
260
+ ret[i] = f(values[i]);
261
+ }
262
+ return ret;
263
+ }
264
+ Vectorized<T> map(T (*const f)(const T &)) const {
265
+ Vectorized<T> ret;
266
+ for (int64_t i = 0; i != size(); i++) {
267
+ ret[i] = f(values[i]);
268
+ }
269
+ return ret;
270
+ }
271
+ template <typename other_t_abs = T,
272
+ typename std::enable_if<!is_floating_point_v<other_t_abs> && !c10::is_complex<other_t_abs>::value, int>::type = 0>
273
+ Vectorized<T> abs() const {
274
+ // other_t_abs is for SFINAE and clarity. Make sure it is not changed.
275
+ static_assert(std::is_same<other_t_abs, T>::value, "other_t_abs must be T");
276
+ return map([](T x) -> T { return x < static_cast<T>(0) ? -x : x; });
277
+ }
278
+ template <typename float_t_abs = T,
279
+ typename std::enable_if<is_floating_point_v<float_t_abs>, int>::type = 0>
280
+ Vectorized<T> abs() const {
281
+ // float_t_abs is for SFINAE and clarity. Make sure it is not changed.
282
+ static_assert(std::is_same<float_t_abs, T>::value, "float_t_abs must be T");
283
+ // Specifically deal with floating-point because the generic code above won't handle -0.0 (which should result in
284
+ // 0.0) properly.
285
+ return map([](T x) -> T { return std::abs(x); });
286
+ }
287
+ template <typename complex_t_abs = T,
288
+ typename std::enable_if<c10::is_complex<complex_t_abs>::value, int>::type = 0>
289
+ Vectorized<T> abs() const {
290
+ // complex_t_abs is for SFINAE and clarity. Make sure it is not changed.
291
+ static_assert(std::is_same<complex_t_abs, T>::value, "complex_t_abs must be T");
292
+ // Specifically map() does not perform the type conversion needed by abs.
293
+ return map([](T x) { return static_cast<T>(std::abs(x)); });
294
+ }
295
+
296
+ template <typename other_t_sgn = T,
297
+ typename std::enable_if<c10::is_complex<other_t_sgn>::value, int>::type = 0>
298
+ Vectorized<T> sgn() const {
299
+ return map(at::native::sgn_impl);
300
+ }
301
+
302
+ template <typename other_t_angle = T,
303
+ typename std::enable_if<!c10::is_complex<other_t_angle>::value, int>::type = 0>
304
+ Vectorized<T> angle() const {
305
+ // other_t_angle is for SFINAE and clarity. Make sure it is not changed.
306
+ static_assert(std::is_same<other_t_angle, T>::value, "other_t_angle must be T");
307
+ return map(at::native::angle_impl<T>); // compiler is unable to resolve the overload without <T>
308
+ }
309
+ template <typename complex_t_angle = T,
310
+ typename std::enable_if<c10::is_complex<complex_t_angle>::value, int>::type = 0>
311
+ Vectorized<T> angle() const {
312
+ // complex_t_angle is for SFINAE and clarity. Make sure it is not changed.
313
+ static_assert(std::is_same<complex_t_angle, T>::value, "complex_t_angle must be T");
314
+ return map([](T x) { return static_cast<T>(std::arg(x)); });
315
+ }
316
+ template <typename other_t_real = T,
317
+ typename std::enable_if<!c10::is_complex<other_t_real>::value, int>::type = 0>
318
+ Vectorized<T> real() const {
319
+ // other_t_real is for SFINAE and clarity. Make sure it is not changed.
320
+ static_assert(std::is_same<other_t_real, T>::value, "other_t_real must be T");
321
+ return *this;
322
+ }
323
+ template <typename complex_t_real = T,
324
+ typename std::enable_if<c10::is_complex<complex_t_real>::value, int>::type = 0>
325
+ Vectorized<T> real() const {
326
+ // complex_t_real is for SFINAE and clarity. Make sure it is not changed.
327
+ static_assert(std::is_same<complex_t_real, T>::value, "complex_t_real must be T");
328
+ return map([](T x) { return static_cast<T>(x.real()); });
329
+ }
330
+ template <typename other_t_imag = T,
331
+ typename std::enable_if<!c10::is_complex<other_t_imag>::value, int>::type = 0>
332
+ Vectorized<T> imag() const {
333
+ // other_t_imag is for SFINAE and clarity. Make sure it is not changed.
334
+ static_assert(std::is_same<other_t_imag, T>::value, "other_t_imag must be T");
335
+ return Vectorized(0);
336
+ }
337
+ template <typename complex_t_imag = T,
338
+ typename std::enable_if<c10::is_complex<complex_t_imag>::value, int>::type = 0>
339
+ Vectorized<T> imag() const {
340
+ // complex_t_imag is for SFINAE and clarity. Make sure it is not changed.
341
+ static_assert(std::is_same<complex_t_imag, T>::value, "complex_t_imag must be T");
342
+ return map([](T x) { return static_cast<T>(x.imag()); });
343
+ }
344
+ template <typename other_t_conj = T,
345
+ typename std::enable_if<!c10::is_complex<other_t_conj>::value, int>::type = 0>
346
+ Vectorized<T> conj() const {
347
+ // other_t_conj is for SFINAE and clarity. Make sure it is not changed.
348
+ static_assert(std::is_same<other_t_conj, T>::value, "other_t_conj must be T");
349
+ return *this;
350
+ }
351
+ template <typename complex_t_conj = T,
352
+ typename std::enable_if<c10::is_complex<complex_t_conj>::value, int>::type = 0>
353
+ Vectorized<T> conj() const {
354
+ // complex_t_conj is for SFINAE and clarity. Make sure it is not changed.
355
+ static_assert(std::is_same<complex_t_conj, T>::value, "complex_t_conj must be T");
356
+ return map([](T x) { return static_cast<T>(std::conj(x)); });
357
+ }
358
+ Vectorized<T> acos() const {
359
+ return map(std::acos);
360
+ }
361
+ Vectorized<T> asin() const {
362
+ return map(std::asin);
363
+ }
364
+ Vectorized<T> atan() const {
365
+ return map(std::atan);
366
+ }
367
+ Vectorized<T> atanh() const {
368
+ return map(std::atanh);
369
+ }
370
+ Vectorized<T> atan2(const Vectorized<T> &exp) const {
371
+ Vectorized<T> ret;
372
+ for (const auto i : c10::irange(size())) {
373
+ ret[i] = std::atan2(values[i], exp[i]);
374
+ }
375
+ return ret;
376
+ }
377
+ template <
378
+ typename U = T,
379
+ typename std::enable_if_t<is_floating_point_v<U>, int> = 0>
380
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
381
+ Vectorized<T> ret;
382
+ for (size_type i = 0; i < size(); i++) {
383
+ ret[i] = c10::copysign(values[i], sign[i]);
384
+ }
385
+ return ret;
386
+ }
387
+ Vectorized<T> erf() const {
388
+ return map(std::erf);
389
+ }
390
+ Vectorized<T> erfc() const {
391
+ return map(std::erfc);
392
+ }
393
+ Vectorized<T> erfinv() const {
394
+ return map(calc_erfinv);
395
+ }
396
+ Vectorized<T> exp() const {
397
+ return map(std::exp);
398
+ }
399
+ Vectorized<T> exp2() const {
400
+ return map(exp2_impl);
401
+ }
402
+ Vectorized<T> expm1() const {
403
+ return map(std::expm1);
404
+ }
405
+ Vectorized<T> frac() const {
406
+ return *this - this->trunc();
407
+ }
408
+ template <
409
+ typename U = T,
410
+ typename std::enable_if_t<is_floating_point_v<U>, int> = 0>
411
+ Vectorized<T> fmod(const Vectorized<T>& q) const {
412
+ // U is for SFINAE purposes only. Make sure it is not changed.
413
+ static_assert(std::is_same<U, T>::value, "U must be T");
414
+ Vectorized<T> ret;
415
+ for (const auto i : c10::irange(size())) {
416
+ ret[i] = std::fmod(values[i], q[i]);
417
+ }
418
+ return ret;
419
+ }
420
+ Vectorized<T> log() const {
421
+ return map(std::log);
422
+ }
423
+ Vectorized<T> log10() const {
424
+ return map(std::log10);
425
+ }
426
+ Vectorized<T> log1p() const {
427
+ return map(std::log1p);
428
+ }
429
+ template <typename other_t_log2 = T,
430
+ typename std::enable_if<!c10::is_complex<other_t_log2>::value, int>::type = 0>
431
+ Vectorized<T> log2() const {
432
+ // other_t_log2 is for SFINAE and clarity. Make sure it is not changed.
433
+ static_assert(std::is_same<other_t_log2, T>::value, "other_t_log2 must be T");
434
+ return map(std::log2);
435
+ }
436
+ template <typename complex_t_log2 = T,
437
+ typename std::enable_if<c10::is_complex<complex_t_log2>::value, int>::type = 0>
438
+ Vectorized<T> log2() const {
439
+ // complex_t_log2 is for SFINAE and clarity. Make sure it is not changed.
440
+ static_assert(std::is_same<complex_t_log2, T>::value, "complex_t_log2 must be T");
441
+ const T log_2 = T(std::log(2.0));
442
+ return Vectorized(map(std::log))/Vectorized(log_2);
443
+ }
444
+ Vectorized<T> ceil() const {
445
+ return map(at::native::ceil_impl);
446
+ }
447
+ Vectorized<T> cos() const {
448
+ return map(std::cos);
449
+ }
450
+ Vectorized<T> cosh() const {
451
+ return map(std::cosh);
452
+ }
453
+ Vectorized<T> floor() const {
454
+ return map(at::native::floor_impl);
455
+ }
456
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
457
+ Vectorized<T> ret;
458
+ for (const auto i : c10::irange(size())) {
459
+ ret[i] = std::hypot(values[i], b[i]);
460
+ }
461
+ return ret;
462
+ }
463
+ Vectorized<T> i0() const {
464
+ return map(calc_i0);
465
+ }
466
+ Vectorized<T> i0e() const {
467
+ return map(calc_i0e);
468
+ }
469
+ Vectorized<T> digamma() const {
470
+ return map(calc_digamma);
471
+ }
472
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
473
+ Vectorized<T> ret;
474
+ for (const auto i : c10::irange(size())) {
475
+ ret[i] = calc_igamma(values[i], x[i]);
476
+ }
477
+ return ret;
478
+ }
479
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
480
+ Vectorized<T> ret;
481
+ for (const auto i : c10::irange(size())) {
482
+ ret[i] = calc_igammac(values[i], x[i]);
483
+ }
484
+ return ret;
485
+ }
486
+ Vectorized<T> neg() const {
487
+ // NB: the trailing return type is needed because we need to coerce the
488
+ // return value back to T in the case of unary operator- incuring a
489
+ // promotion
490
+ return map([](T x) -> T { return -x; });
491
+ }
492
+ Vectorized<T> nextafter(const Vectorized<T> &b) const {
493
+ Vectorized<T> ret;
494
+ for (const auto i : c10::irange(size())) {
495
+ ret[i] = std::nextafter(values[i], b[i]);
496
+ }
497
+ return ret;
498
+ }
499
+ Vectorized<T> round() const {
500
+ // We do not use std::round because we would like to round midway numbers to the nearest even integer.
501
+ return map(at::native::round_impl);
502
+ }
503
+ Vectorized<T> sin() const {
504
+ return map(std::sin);
505
+ }
506
+ Vectorized<T> sinh() const {
507
+ return map(std::sinh);
508
+ }
509
+ Vectorized<T> tan() const {
510
+ return map(std::tan);
511
+ }
512
+ Vectorized<T> tanh() const {
513
+ return map(std::tanh);
514
+ }
515
+ Vectorized<T> trunc() const {
516
+ return map(at::native::trunc_impl);
517
+ }
518
+ Vectorized<T> lgamma() const {
519
+ return map(std::lgamma);
520
+ }
521
+ Vectorized<T> sqrt() const {
522
+ return map(std::sqrt);
523
+ }
524
+ Vectorized<T> reciprocal() const {
525
+ return map([](T x) { return (T)(1) / x; });
526
+ }
527
+ Vectorized<T> rsqrt() const {
528
+ return map([](T x) { return (T)1 / std::sqrt(x); });
529
+ }
530
+ Vectorized<T> pow(const Vectorized<T> &exp) const {
531
+ Vectorized<T> ret;
532
+ for (const auto i : c10::irange(size())) {
533
+ ret[i] = std::pow(values[i], exp[i]);
534
+ }
535
+ return ret;
536
+ }
537
+ private:
538
+ template <typename Op>
539
+ inline Vectorized<T> binary_pred(const Vectorized<T>& other, Op op) const {
540
+ // All bits are set to 1 if the pred is true, otherwise 0.
541
+ Vectorized<T> vector;
542
+ for (int64_t i = 0; i != size(); i++) {
543
+ if (op(values[i], other.values[i])) {
544
+ std::memset(static_cast<void*>(vector.values + i), 0xFF, sizeof(T));
545
+ } else {
546
+ std::memset(static_cast<void*>(vector.values + i), 0, sizeof(T));
547
+ }
548
+ }
549
+ return vector;
550
+ }
551
+
552
+ public:
553
+ Vectorized<T> operator==(const Vectorized<T>& other) const { return binary_pred(other, std::equal_to<T>()); }
554
+ Vectorized<T> operator!=(const Vectorized<T>& other) const { return binary_pred(other, std::not_equal_to<T>()); }
555
+ Vectorized<T> operator>=(const Vectorized<T>& other) const { return binary_pred(other, std::greater_equal<T>()); }
556
+ Vectorized<T> operator<=(const Vectorized<T>& other) const { return binary_pred(other, std::less_equal<T>()); }
557
+ Vectorized<T> operator>(const Vectorized<T>& other) const { return binary_pred(other, std::greater<T>()); }
558
+ Vectorized<T> operator<(const Vectorized<T>& other) const { return binary_pred(other, std::less<T>()); }
559
+
560
+ private:
561
+ template <typename Op>
562
+ inline Vectorized<T> binary_pred_bool(const Vectorized<T>& other, Op op) const {
563
+ // 1 if the pred is true, otherwise 0.
564
+ Vectorized<T> vector;
565
+ for (int i = 0; i != size(); ++ i) {
566
+ vector[i] = static_cast<T>(op(values[i], other.values[i]));
567
+ }
568
+ return vector;
569
+ }
570
+
571
+ public:
572
+ Vectorized<T> eq(const Vectorized<T>& other) const { return binary_pred_bool(other, std::equal_to<T>()); }
573
+ Vectorized<T> ne(const Vectorized<T>& other) const { return binary_pred_bool(other, std::not_equal_to<T>()); }
574
+ Vectorized<T> gt(const Vectorized<T>& other) const { return binary_pred_bool(other, std::greater<T>()); }
575
+ Vectorized<T> ge(const Vectorized<T>& other) const { return binary_pred_bool(other, std::greater_equal<T>()); }
576
+ Vectorized<T> lt(const Vectorized<T>& other) const { return binary_pred_bool(other, std::less<T>()); }
577
+ Vectorized<T> le(const Vectorized<T>& other) const { return binary_pred_bool(other, std::less_equal<T>()); }
578
+ };
579
+
580
+ template <class T> Vectorized<T> inline operator+(const Vectorized<T> &a, const Vectorized<T> &b) {
581
+ Vectorized<T> c;
582
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
583
+ c[i] = a[i] + b[i];
584
+ }
585
+ return c;
586
+ }
587
+
588
+ template <class T> Vectorized<T> inline operator-(const Vectorized<T> &a, const Vectorized<T> &b) {
589
+ Vectorized<T> c;
590
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
591
+ c[i] = a[i] - b[i];
592
+ }
593
+ return c;
594
+ }
595
+
596
+ template <class T> Vectorized<T> inline operator*(const Vectorized<T> &a, const Vectorized<T> &b) {
597
+ Vectorized<T> c;
598
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
599
+ c[i] = a[i] * b[i];
600
+ }
601
+ return c;
602
+ }
603
+
604
+ template <class T> Vectorized<T> inline operator/(const Vectorized<T> &a, const Vectorized<T> &b) __ubsan_ignore_float_divide_by_zero__ {
605
+ Vectorized<T> c;
606
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
607
+ c[i] = a[i] / b[i];
608
+ }
609
+ return c;
610
+ }
611
+
612
+ template <class T> Vectorized<T> inline operator||(
613
+ const Vectorized<T> &a, const Vectorized<T> &b) {
614
+ Vectorized<T> c;
615
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
616
+ c[i] = a[i] || b[i];
617
+ }
618
+ return c;
619
+ }
620
+
621
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
622
+ // either input is a NaN.
623
+ template <class T,
624
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
625
+ Vectorized<T> inline maximum(const Vectorized<T> &a, const Vectorized<T> &b) {
626
+ Vectorized<T> c;
627
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
628
+ c[i] = (a[i] > b[i]) ? a[i] : b[i];
629
+ if (_isnan(a[i])) {
630
+ // If either input is NaN, propagate a NaN.
631
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
632
+ // ternary operator above.
633
+ c[i] = a[i];
634
+ }
635
+ }
636
+ return c;
637
+ }
638
+
639
+ template <class T,
640
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
641
+ Vectorized<T> inline maximum(const Vectorized<T> &a, const Vectorized<T> &b) {
642
+ Vectorized<T> c;
643
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
644
+ c[i] = (std::abs(a[i]) > std::abs(b[i])) ? a[i] : b[i];
645
+ if (_isnan(a[i])) {
646
+ // If either input is NaN, propagate a NaN.
647
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
648
+ // ternary operator above.
649
+ c[i] = a[i];
650
+ }
651
+ }
652
+ return c;
653
+ }
654
+
655
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
656
+ // either input is a NaN.
657
+ template <class T,
658
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
659
+ Vectorized<T> inline minimum(const Vectorized<T> &a, const Vectorized<T> &b) {
660
+ Vectorized<T> c;
661
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
662
+ c[i] = (a[i] < b[i]) ? a[i] : b[i];
663
+ if (_isnan(a[i])) {
664
+ // If either input is NaN, propagate a NaN.
665
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
666
+ // ternary operator above.
667
+ c[i] = a[i];
668
+ }
669
+ }
670
+ return c;
671
+ }
672
+
673
+ template <class T,
674
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
675
+ Vectorized<T> inline minimum(const Vectorized<T> &a, const Vectorized<T> &b) {
676
+ Vectorized<T> c;
677
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
678
+ c[i] = (std::abs(a[i]) < std::abs(b[i])) ? a[i] : b[i];
679
+ if (_isnan(a[i])) {
680
+ // If either input is NaN, propagate a NaN.
681
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
682
+ // ternary operator above.
683
+ c[i] = a[i];
684
+ }
685
+ }
686
+ return c;
687
+ }
688
+
689
+ template <class T,
690
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
691
+ Vectorized<T> inline clamp(const Vectorized<T> &a, const Vectorized<T> &min_vec, const Vectorized<T> &max_vec) {
692
+ Vectorized<T> c;
693
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
694
+ c[i] = std::min(std::max(a[i], min_vec[i]), max_vec[i]);
695
+ }
696
+ return c;
697
+ }
698
+
699
+ template <class T,
700
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
701
+ Vectorized<T> inline clamp_max(const Vectorized<T> &a, const Vectorized<T> &max_vec) {
702
+ Vectorized<T> c;
703
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
704
+ c[i] = a[i] > max_vec[i] ? max_vec[i] : a[i];
705
+ }
706
+ return c;
707
+ }
708
+
709
+ template <class T,
710
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
711
+ Vectorized<T> inline clamp_min(const Vectorized<T> &a, const Vectorized<T> &min_vec) {
712
+ Vectorized<T> c;
713
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
714
+ c[i] = a[i] < min_vec[i] ? min_vec[i] : a[i];
715
+ }
716
+ return c;
717
+ }
718
+
719
+ struct Vectorizedi;
720
+
721
+ #if defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)
722
+ template <class T, typename Op>
723
+ static inline Vectorized<T> bitwise_binary_op(const Vectorized<T> &a, const Vectorized<T> &b, Op op) {
724
+ int_vector buffer;
725
+ #if defined(CPU_CAPABILITY_AVX2)
726
+ int_vector a_buffer = _mm256_load_si256(reinterpret_cast<const int_vector*>((const T*)a));
727
+ int_vector b_buffer = _mm256_load_si256(reinterpret_cast<const int_vector*>((const T*)b));
728
+ #elif defined(CPU_CAPABILITY_AVX512)
729
+ int_vector a_buffer = _mm512_load_si512(reinterpret_cast<const int_vector*>((const T*)a));
730
+ int_vector b_buffer = _mm512_load_si512(reinterpret_cast<const int_vector*>((const T*)b));
731
+ #endif
732
+ buffer = op(a_buffer, b_buffer);
733
+ __at_align__ T results[Vectorized<T>::size()];
734
+
735
+ #if defined(CPU_CAPABILITY_AVX2)
736
+ _mm256_store_si256(reinterpret_cast<int_vector*>(results), buffer);
737
+ #elif defined(CPU_CAPABILITY_AVX512)
738
+ _mm512_store_si512(reinterpret_cast<int_vector*>(results), buffer);
739
+ #endif
740
+ return Vectorized<T>::loadu(results);
741
+ }
742
+
743
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
744
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
745
+ // We enclose _mm512_and_si512 or _mm256_and_si256 with lambda because it is always_inline
746
+ #if defined(CPU_CAPABILITY_AVX2)
747
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_and_si256(a, b); });
748
+ #elif defined(CPU_CAPABILITY_AVX512)
749
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_and_si512(a, b); });
750
+ #endif
751
+ }
752
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
753
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
754
+ // We enclose _mm512_or_si512 or _mm256_or_si256 with lambda because it is always_inline
755
+ #if defined(CPU_CAPABILITY_AVX2)
756
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_or_si256(a, b); });
757
+ #elif defined(CPU_CAPABILITY_AVX512)
758
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_or_si512(a, b); });
759
+ #endif
760
+ }
761
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
762
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
763
+ // We enclose _mm512_xor_si512 or _mm256_xor_si256 with lambda because it is always_inline
764
+ #if defined(CPU_CAPABILITY_AVX2)
765
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_xor_si256(a, b); });
766
+ #elif defined(CPU_CAPABILITY_AVX512)
767
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_xor_si512(a, b); });
768
+ #endif
769
+ }
770
+
771
+ #else
772
+
773
+ template <typename T>
774
+ auto load(char const* data) -> T {
775
+ T ret;
776
+ std::memcpy(&ret, data, sizeof(ret));
777
+ return ret;
778
+ }
779
+
780
+ template<class T, typename Op>
781
+ static inline Vectorized<T> bitwise_binary_op(const Vectorized<T> &a, const Vectorized<T> &b, Op op) {
782
+ static constexpr uint32_t element_no = VECTOR_WIDTH / sizeof(intmax_t);
783
+ __at_align__ intmax_t buffer[element_no];
784
+ static_assert(VECTOR_WIDTH % sizeof(intmax_t) == 0, "VECTOR_WIDTH not a multiple of sizeof(intmax_t)");
785
+ static_assert(sizeof(buffer) == sizeof(Vectorized<T>), "sizeof(buffer) must match sizeof(Vectorized<T>)");
786
+ // We should be using memcpy in order to respect the strict aliasing rule
787
+ // see: https://github.com/pytorch/pytorch/issues/66119
788
+ // Using char* is defined in the C11 standard 6.5 Expression paragraph 7
789
+ // (http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf)
790
+ const auto* a_data = a.as_bytes();
791
+ const auto* b_data = b.as_bytes();
792
+ // load each intmax_t chunk and process; increase pointers by sizeof(intmax_t)
793
+ for (auto& out : buffer) {
794
+ out = op(load<intmax_t>(a_data), load<intmax_t>(b_data));
795
+ a_data += sizeof(intmax_t);
796
+ b_data += sizeof(intmax_t);
797
+ }
798
+ assert(a_data == a.as_bytes() + sizeof(a));
799
+ assert(b_data == b.as_bytes() + sizeof(b));
800
+ return Vectorized<T>::loadu(buffer);
801
+ }
802
+
803
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
804
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
805
+ return bitwise_binary_op(a, b, std::bit_and<intmax_t>());
806
+ }
807
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
808
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
809
+ return bitwise_binary_op(a, b, std::bit_or<intmax_t>());
810
+ }
811
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
812
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
813
+ return bitwise_binary_op(a, b, std::bit_xor<intmax_t>());
814
+ }
815
+
816
+ #endif // defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)
817
+
818
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
819
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
820
+ Vectorized<T> ones; // All bits are 1
821
+ memset((T*) ones, 0xFF, VECTOR_WIDTH);
822
+ return a ^ ones;
823
+ }
824
+
825
+ template <class T> Vectorized<T> inline operator<<(const Vectorized<T> &a, const Vectorized<T> &b) {
826
+ constexpr T max_shift = sizeof(T) * CHAR_BIT;
827
+ Vectorized<T> c;
828
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
829
+ T shift = b[i];
830
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
831
+ c[i] = 0;
832
+ } else {
833
+ c[i] = static_cast<std::make_unsigned_t<T>>(a[i]) << shift;
834
+ }
835
+ }
836
+ return c;
837
+ }
838
+
839
+ template <class T> Vectorized<T> inline operator>>(const Vectorized<T> &a, const Vectorized<T> &b) {
840
+ // right shift value to retain sign bit for signed and no bits for unsigned
841
+ constexpr T max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v<T>;
842
+ Vectorized<T> c;
843
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
844
+ T shift = b[i];
845
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
846
+ c[i] = a[i] >> max_shift;
847
+ } else {
848
+ c[i] = a[i] >> shift;
849
+ }
850
+ }
851
+ return c;
852
+ }
853
+
854
+ template <typename T>
855
+ inline Vectorized<T>& operator += (Vectorized<T>& a, const Vectorized<T>& b) {
856
+ a = a + b;
857
+ return a;
858
+ }
859
+ template <typename T>
860
+ inline Vectorized<T>& operator -= (Vectorized<T>& a, const Vectorized<T>& b) {
861
+ a = a - b;
862
+ return a;
863
+ }
864
+ template <typename T>
865
+ inline Vectorized<T>& operator /= (Vectorized<T>& a, const Vectorized<T>& b) {
866
+ a = a / b;
867
+ return a;
868
+ }
869
+ template <typename T>
870
+ inline Vectorized<T>& operator %= (Vectorized<T>& a, const Vectorized<T>& b) {
871
+ a = a % b;
872
+ return a;
873
+ }
874
+ template <typename T>
875
+ inline Vectorized<T>& operator *= (Vectorized<T>& a, const Vectorized<T>& b) {
876
+ a = a * b;
877
+ return a;
878
+ }
879
+
880
+ template <typename T>
881
+ inline Vectorized<T>& operator <<= (Vectorized<T>& a, const Vectorized<T>& b) {
882
+ a = a << b;
883
+ return a;
884
+ }
885
+
886
+ template <typename T>
887
+ inline Vectorized<T>& operator >>= (Vectorized<T>& a, const Vectorized<T>& b) {
888
+ a = a >> b;
889
+ return a;
890
+ }
891
+
892
+ template <typename T>
893
+ inline Vectorized<T> fmadd(const Vectorized<T>& a, const Vectorized<T>& b, const Vectorized<T>& c) {
894
+ return a * b + c;
895
+ }
896
+
897
+ template <typename T>
898
+ inline Vectorized<T> fmsub(const Vectorized<T>& a, const Vectorized<T>& b, const Vectorized<T>& c) {
899
+ return a * b - c;
900
+ }
901
+
902
+ template <int64_t scale = 1, typename T = void>
903
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<T>>
904
+ inline gather(T const* base_addr, const Vectorized<int_same_size_t<T>>& vindex) {
905
+ static constexpr int size = Vectorized<T>::size();
906
+ int_same_size_t<T> index_arr[size];
907
+ vindex.store(static_cast<void*>(index_arr));
908
+ T buffer[size];
909
+ for (const auto i : c10::irange(size)) {
910
+ buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)];
911
+ }
912
+ return Vectorized<T>::loadu(static_cast<void*>(buffer));
913
+ }
914
+
915
+ template <int64_t scale = 1, typename T = void>
916
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<T>>
917
+ inline mask_gather(const Vectorized<T>& src, T const* base_addr,
918
+ const Vectorized<int_same_size_t<T>>& vindex, Vectorized<T>& mask) {
919
+ static constexpr int size = Vectorized<T>::size();
920
+ T src_arr[size];
921
+ int_same_size_t<T> mask_arr[size]; // use int type so we can logical and
922
+ int_same_size_t<T> index_arr[size];
923
+ src.store(static_cast<void*>(src_arr));
924
+ mask.store(static_cast<void*>(mask_arr));
925
+ vindex.store(static_cast<void*>(index_arr));
926
+ T buffer[size];
927
+ for (const auto i : c10::irange(size)) {
928
+ if (mask_arr[i] & 0x01) { // check highest bit
929
+ buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)];
930
+ } else {
931
+ buffer[i] = src_arr[i];
932
+ }
933
+ }
934
+ mask = Vectorized<T>(); // "zero out" mask
935
+ return Vectorized<T>::loadu(static_cast<void*>(buffer));
936
+ }
937
+
938
+ // Cast a given vector to another type without changing the bits representation.
939
+ // So a Vectorized<double> of 512 bits containing all ones can be cast to a
940
+ // Vectorized<int64_t> of 512 bits containing all ones (i.e., eight negative 1s).
941
+ // A Vec<double> of 256 bits containing all ones can be cast to a
942
+ // Vec<int64_t> of 256 bits containing all ones (i.e., four negative 1s).
943
+ // There is a struct here because we don't have static_if and I can't
944
+ // partially specialize a templated function.
945
+ template<typename dst_t, typename src_t>
946
+ struct CastImpl {
947
+ static inline Vectorized<dst_t> apply(const Vectorized<src_t>& src) {
948
+ src_t src_arr[Vectorized<src_t>::size()];
949
+ src.store(static_cast<void*>(src_arr));
950
+ return Vectorized<dst_t>::loadu(static_cast<const void*>(src_arr));
951
+ }
952
+ };
953
+
954
+ template<typename scalar_t>
955
+ struct CastImpl<scalar_t, scalar_t> {
956
+ static inline Vectorized<scalar_t> apply(const Vectorized<scalar_t>& src) {
957
+ return src;
958
+ }
959
+ };
960
+
961
+ template<typename dst_t, typename src_t>
962
+ inline Vectorized<dst_t> cast(const Vectorized<src_t>& src) {
963
+ return CastImpl<dst_t, src_t>::apply(src);
964
+ }
965
+
966
+ template <typename T, typename IntType = int_same_size_t<T>>
967
+ inline Vectorized<IntType> convert_to_int_of_same_size(const Vectorized<T>& src) {
968
+ static_assert(sizeof(T) == sizeof(IntType));
969
+ static constexpr int size = Vectorized<T>::size();
970
+
971
+ std::array<T, size> src_arr;
972
+ src.store(static_cast<void*>(src_arr.data()));
973
+ std::array<IntType, size> buffer;
974
+ std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(),
975
+ [](const T& x) { return static_cast<IntType>(x); });
976
+ return Vectorized<IntType>::loadu(static_cast<const void*>(buffer.data()));
977
+ }
978
+
979
+ // Example inputs for AVX512:
980
+ // a Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
981
+ // b Vectorized<float> = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
982
+ // returns:
983
+ // Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
984
+ // Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
985
+ // Example inputs for AVX2: a Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3}
986
+ // b Vectorized<float> = {a4, b4, a5, b5, a6, b6, a7, b7}
987
+ // returns: Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7}
988
+ // Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7}
989
+ template <typename T>
990
+ inline std::enable_if_t<Vectorized<T>::size() % 2 == 0, std::pair<Vectorized<T>, Vectorized<T>>>
991
+ deinterleave2(const Vectorized<T>& a, const Vectorized<T>& b) {
992
+ static constexpr int size = Vectorized<T>::size();
993
+ static constexpr int half_size = size / 2;
994
+ T a_arr[size];
995
+ T b_arr[size];
996
+ T buffer1[size];
997
+ T buffer2[size];
998
+ a.store(static_cast<void*>(a_arr));
999
+ b.store(static_cast<void*>(b_arr));
1000
+ for (const auto i : c10::irange(half_size)) {
1001
+ buffer1[i] = a_arr[i * 2];
1002
+ buffer1[half_size + i] = b_arr[i * 2];
1003
+ buffer2[i] = a_arr[i * 2 + 1];
1004
+ buffer2[half_size + i] = b_arr[i * 2 + 1];
1005
+ }
1006
+ return std::make_pair(Vectorized<T>::loadu(static_cast<void*>(buffer1)),
1007
+ Vectorized<T>::loadu(static_cast<void*>(buffer2)));
1008
+ }
1009
+
1010
+ // inverse operation of deinterleave2
1011
+ // Example inputs for AVX512:
1012
+ // a Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
1013
+ // b Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
1014
+ // returns, for AVX512:
1015
+ // Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
1016
+ // Vectorized<float> = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
1017
+ // Example inputs for AVX2 : a Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7}
1018
+ // b Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7}
1019
+ // returns: Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3}
1020
+ // Vectorized<float> = {a4, b4, a5, b5, a6, b6, a7, b7}
1021
+ template <typename T>
1022
+ inline std::enable_if_t<Vectorized<T>::size() % 2 == 0, std::pair<Vectorized<T>, Vectorized<T>>>
1023
+ interleave2(const Vectorized<T>& a, const Vectorized<T>& b) {
1024
+ static constexpr int size = Vectorized<T>::size();
1025
+ static constexpr int half_size = size / 2;
1026
+ T a_arr[size];
1027
+ T b_arr[size];
1028
+ T buffer1[size];
1029
+ T buffer2[size];
1030
+ a.store(static_cast<void*>(a_arr));
1031
+ b.store(static_cast<void*>(b_arr));
1032
+ for (const auto i : c10::irange(half_size)) {
1033
+ buffer1[i * 2] = a_arr[i];
1034
+ buffer1[i * 2 + 1] = b_arr[i];
1035
+ buffer2[i * 2] = a_arr[half_size + i];
1036
+ buffer2[i * 2 + 1] = b_arr[half_size + i];
1037
+ }
1038
+ return std::make_pair(Vectorized<T>::loadu(static_cast<void*>(buffer1)),
1039
+ Vectorized<T>::loadu(static_cast<void*>(buffer2)));
1040
+ }
1041
+
1042
+ template <typename src_T, typename dst_T>
1043
+ inline void convert(const src_T *src, dst_T *dst, int64_t n) {
1044
+ #ifndef _MSC_VER
1045
+ # pragma unroll
1046
+ #endif
1047
+ for (C10_UNUSED const auto i : c10::irange(n)) {
1048
+ *dst = c10::convert<dst_T>(c10::load(src));
1049
+ src++;
1050
+ dst++;
1051
+ }
1052
+ }
1053
+
1054
+ template <typename T>
1055
+ inline Vectorized<T> flip(const Vectorized<T> & data) {
1056
+ static constexpr int size = Vectorized<T>::size();
1057
+ T output[size];
1058
+ T buffer[size];
1059
+ data.store(static_cast<void*>(buffer));
1060
+ for (const auto i : c10::irange(size)) {
1061
+ output[i] = buffer[size - i - 1];
1062
+ }
1063
+ return Vectorized<T>::loadu(static_cast<void*>(output));
1064
+ }
1065
+
1066
+ // Transpose the `src` buffer of type `T` and size (M,N) into the `dst` buffer. `ld_src` is the leading
1067
+ // dimension of `src` and `ld_dst` is the leading dimension of `dst`.
1068
+ template <typename T, int M, int N>
1069
+ inline void transpose_mxn(const T* src, int64_t ld_src, T* dst, int64_t ld_dst) {
1070
+ for (int i = 0; i < M; i++) {
1071
+ for (int j = 0; j < N; j++) {
1072
+ dst[j*ld_dst + i] = src[i*ld_src + j];
1073
+ }
1074
+ }
1075
+ }
1076
+
1077
+ }} // namespace at::vec::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+
5
+ namespace at::vec {
6
+ // See Note [CPU_CAPABILITY namespace]
7
+ inline namespace CPU_CAPABILITY {
8
+
9
+ #if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
10
+ !defined(__APPLE__)
11
+ static inline uint16_t float2half_scalar(float val) {
12
+ #if defined(CPU_CAPABILITY_AVX2)
13
+ #if defined(_MSC_VER)
14
+ __m256 v = _mm256_set1_ps(val);
15
+ __m128i o =
16
+ _mm256_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
17
+ return static_cast<std::uint16_t>(_mm_cvtsi128_si32(o));
18
+ #else
19
+ return _cvtss_sh(val, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
20
+ #endif
21
+ #elif defined(CPU_CAPABILITY_AVX512)
22
+ __m512 v = _mm512_set1_ps(val);
23
+ __m256i o =
24
+ _mm512_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
25
+ return static_cast<std::uint16_t>(
26
+ _mm_cvtsi128_si32(_mm256_castsi256_si128(o)));
27
+ #endif
28
+ }
29
+
30
+ static inline float half2float_scalar(uint16_t val) {
31
+ #if defined(CPU_CAPABILITY_AVX2)
32
+ #if defined(_MSC_VER)
33
+ __m128i v = _mm_cvtsi32_si128(val);
34
+ __m256 o = _mm256_cvtph_ps(v);
35
+ return _mm256_cvtss_f32(o);
36
+ #else
37
+ return _cvtsh_ss(val);
38
+ #endif
39
+ #elif defined(CPU_CAPABILITY_AVX512)
40
+ __m256i v =
41
+ _mm256_setr_epi16(val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
42
+ __m512 o = _mm512_cvtph_ps(v);
43
+ return _mm512_cvtss_f32(o);
44
+ #endif
45
+ }
46
+
47
+ #endif
48
+
49
+ } // namespace CPU_CAPABILITY
50
+ } // namespace at::vec
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/AtomicAddFloat.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef ATOMIC_ADD_FLOAT
2
+ #define ATOMIC_ADD_FLOAT
3
+
4
+ #if (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))
5
+ #include <ATen/native/cpu/Intrinsics.h>
6
+ #else
7
+ #define _mm_pause()
8
+ #endif
9
+
10
+ #include <atomic>
11
+
12
+ static inline void cpu_atomic_add_float(float* dst, float fvalue)
13
+ {
14
+ typedef union {
15
+ unsigned intV;
16
+ float floatV;
17
+ } uf32_t;
18
+
19
+ uf32_t new_value, old_value;
20
+ std::atomic<unsigned>* dst_intV = (std::atomic<unsigned>*)(dst);
21
+
22
+ old_value.floatV = *dst;
23
+ new_value.floatV = old_value.floatV + fvalue;
24
+
25
+ unsigned* old_intV = (unsigned*)(&old_value.intV);
26
+ while (!std::atomic_compare_exchange_strong(dst_intV, old_intV, new_value.intV)) {
27
+ #ifdef __aarch64__
28
+ __asm__ __volatile__("yield;" : : : "memory");
29
+ #else
30
+ _mm_pause();
31
+ #endif
32
+ old_value.floatV = *dst;
33
+ new_value.floatV = old_value.floatV + fvalue;
34
+ }
35
+ }
36
+
37
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <ATen/core/IListRef.h>
6
+
7
+ namespace at { namespace native {
8
+
9
+ using cat_serial_fn = void(*)(const Tensor &, const MaterializedITensorListRef&, int64_t);
10
+ DECLARE_DISPATCH(cat_serial_fn, cat_serial_stub);
11
+
12
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CopyKernel.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ struct TensorIteratorBase;
5
+
6
+ namespace native {
7
+ inline namespace CPU_CAPABILITY {
8
+
9
+ void direct_copy_kernel(TensorIteratorBase &iter);
10
+ void copy_kernel(TensorIterator& iter, bool /*non_blocking*/);
11
+
12
+ }}} // namespace at::native::CPU_CAPABILITY
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DistributionTemplates.h ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CPUApplyUtils.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/ExpandBase.h>
6
+ #include <ATen/core/DistributionsHelper.h>
7
+ #include <ATen/native/TensorIterator.h>
8
+ #include <ATen/native/cpu/Loops.h>
9
+ #include <limits>
10
+ #include <mutex>
11
+
12
+ #ifdef CPU_CAPABILITY_AVX2
13
+ #include <ATen/native/cpu/avx_mathfun.h>
14
+ #include <c10/util/irange.h>
15
+ #endif
16
+
17
+
18
+ namespace at {
19
+ namespace native {
20
+ namespace templates {
21
+ namespace cpu {
22
+ namespace {
23
+
24
+ // ==================================================== Random ========================================================
25
+
26
+ template<typename RNG>
27
+ void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG generator) {
28
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_from_to_kernel_cpu", [&] {
29
+ std::lock_guard<std::mutex> lock(generator->mutex_);
30
+ cpu_serial_kernel(iter, [range, base, generator]() -> scalar_t {
31
+ uniform_int_from_to_distribution<scalar_t> random(range, base);
32
+ return random(generator);
33
+ });
34
+ });
35
+ }
36
+
37
+ // This is the special kernel to handle single specific case:
38
+ // from(inclusive) = std::numeric_limits<int64_t>::lowest()
39
+ // to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
40
+ template<typename RNG>
41
+ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG generator) {
42
+ AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel_cpu", [&] {
43
+ if constexpr (std::is_same<scalar_t, int64_t>::value ||
44
+ std::is_same<scalar_t, double>::value ||
45
+ std::is_same<scalar_t, float>::value ||
46
+ std::is_same<scalar_t, at::BFloat16>::value) {
47
+ std::lock_guard<std::mutex> lock(generator->mutex_);
48
+ cpu_serial_kernel(iter, [generator]() -> scalar_t {
49
+ uniform_int_full_range_distribution<scalar_t> random;
50
+ return random(generator);
51
+ });
52
+ } else {
53
+ TORCH_CHECK(false, "random_full_64_bits_range_kernel_cpu handles only int64, double, float and bfloat16");
54
+ }
55
+ });
56
+ }
57
+
58
+ template<typename RNG>
59
+ struct RandomFromToKernel {
60
+ void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
61
+ random_from_to_kernel(iter, range, base, check_generator<RNG>(gen));
62
+ }
63
+ void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
64
+ random_full_64_bits_range_kernel(iter, check_generator<RNG>(gen));
65
+ }
66
+ };
67
+
68
+ template<typename RNG>
69
+ void random_kernel(TensorIteratorBase& iter, RNG generator) {
70
+ std::lock_guard<std::mutex> lock(generator->mutex_);
71
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "random_kernel_cpu", [&] {
72
+ cpu_serial_kernel(iter, [generator]() -> scalar_t {
73
+ uniform_int_distribution<scalar_t> random;
74
+ return random(generator);
75
+ });
76
+ });
77
+ }
78
+
79
+ template<typename RNG>
80
+ struct RandomKernel {
81
+ void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
82
+ random_kernel(iter, check_generator<RNG>(gen));
83
+ }
84
+ };
85
+
86
+ // ==================================================== Normal ========================================================
87
+
88
+ #ifdef CPU_CAPABILITY_AVX2
89
+ static void normal_fill_16_AVX2(float *data,
90
+ const __m256* two_pi,
91
+ const __m256* one,
92
+ const __m256* minus_two,
93
+ const __m256* mean,
94
+ const __m256* std_v) {
95
+ const __m256 u1 = _mm256_sub_ps(*one, _mm256_loadu_ps(data));
96
+ const __m256 u2 = _mm256_loadu_ps(data + 8);
97
+ // sincos256_ps and log256_ps are from avx_mathfun.h
98
+ const __m256 radius = _mm256_sqrt_ps(_mm256_mul_ps(*minus_two, log256_ps(u1)));
99
+ const __m256 theta = _mm256_mul_ps(*two_pi, u2);
100
+ __m256 sintheta, costheta;
101
+ sincos256_ps(theta, &sintheta, &costheta);
102
+ const __m256 n1 = _mm256_mul_ps(radius, costheta);
103
+ const __m256 n2 = _mm256_mul_ps(radius, sintheta);
104
+ _mm256_storeu_ps(data, _mm256_fmadd_ps(n1, *std_v, *mean));
105
+ _mm256_storeu_ps(data + 8, _mm256_fmadd_ps(n2, *std_v, *mean));
106
+ }
107
+
108
+ template<typename RNG>
109
+ void normal_fill_AVX2(const TensorBase &self, const float mean, const float std, RNG generator) {
110
+ float *data = self.data_ptr<float>();
111
+ auto size = self.numel();
112
+ std::lock_guard<std::mutex> lock(generator->mutex_);
113
+ for (const auto i : c10::irange(size)) {
114
+ at::uniform_real_distribution<float> uniform(0, 1);
115
+ data[i] = uniform(generator);
116
+ }
117
+ const __m256 two_pi = _mm256_set1_ps(2.0f * c10::pi<double>);
118
+ const __m256 one = _mm256_set1_ps(1.0f);
119
+ const __m256 minus_two = _mm256_set1_ps(-2.0f);
120
+ const __m256 mean_v = _mm256_set1_ps(mean);
121
+ const __m256 std_v = _mm256_set1_ps(std);
122
+
123
+ for (int64_t i = 0; i < size - 15; i += 16) {
124
+ normal_fill_16_AVX2(data + i, &two_pi, &one, &minus_two, &mean_v, &std_v);
125
+ }
126
+
127
+ if (size % 16 != 0) {
128
+ // Recompute the last 16 values.
129
+ data = data + size - 16;
130
+ for (const auto i : c10::irange(16)) {
131
+ at::uniform_real_distribution<float> uniform(0, 1);
132
+ data[i] = uniform(generator);
133
+ }
134
+ normal_fill_16_AVX2(data, &two_pi, &one, &minus_two, &mean_v, &std_v);
135
+ }
136
+ }
137
+ #endif
138
+
139
+ template <typename scalar_t>
140
+ static void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t std) {
141
+ for (const auto j : c10::irange(8)) {
142
+ const scalar_t u1 = 1 - data[j]; // [0, 1) -> (0, 1] for log.
143
+ const scalar_t u2 = data[j + 8];
144
+ const scalar_t radius = std::sqrt(-2 * std::log(u1));
145
+ const scalar_t theta = 2.0f * c10::pi<double> * u2;
146
+ data[j] = radius * std::cos(theta) * std + mean;
147
+ data[j + 8] = radius * std::sin(theta) * std + mean;
148
+ }
149
+ }
150
+
151
+ template <typename scalar_t, typename RNG>
152
+ void normal_fill(const TensorBase &self, const scalar_t mean, const scalar_t std, RNG generator) {
153
+ scalar_t *data = self.data_ptr<scalar_t>();
154
+ auto size = self.numel();
155
+ std::lock_guard<std::mutex> lock(generator->mutex_);
156
+ for (const auto i : c10::irange(size)) {
157
+ at::uniform_real_distribution<scalar_t> uniform(0, 1);
158
+ data[i] = uniform(generator);
159
+ }
160
+
161
+ for (int64_t i = 0; i < size - 15; i += 16) {
162
+ normal_fill_16<scalar_t>(data + i, mean, std);
163
+ }
164
+ if (size % 16 != 0) {
165
+ // Recompute the last 16 values.
166
+ data = data + size - 16;
167
+ for (const auto i : c10::irange(16)) {
168
+ at::uniform_real_distribution<scalar_t> uniform(0, 1);
169
+ data[i] = uniform(generator);
170
+ }
171
+ normal_fill_16<scalar_t>(data, mean, std);
172
+ }
173
+ }
174
+
175
+ template<typename RNG>
176
+ void normal_kernel(const TensorBase &self, double mean, double std, RNG generator) {
177
+ auto size = self.numel();
178
+ if (self.scalar_type() == ScalarType::Float && size >= 16 && self.is_contiguous()) {
179
+ #ifdef CPU_CAPABILITY_AVX2
180
+ normal_fill_AVX2(self, static_cast<float>(mean), static_cast<float>(std), generator);
181
+ #else
182
+ normal_fill(self, static_cast<float>(mean), static_cast<float>(std), generator);
183
+ #endif
184
+ } else {
185
+ AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, self.scalar_type(), "normal_kernel_cpu", [&] {
186
+ if (size >= 16 && self.is_contiguous()) {
187
+ normal_fill<scalar_t>(self, static_cast<scalar_t>(mean), static_cast<scalar_t>(std), generator);
188
+ } else {
189
+ auto iter = TensorIterator::borrowing_nullary_op(self);
190
+ std::lock_guard<std::mutex> lock(generator->mutex_);
191
+ cpu_serial_kernel(iter, [mean, std, generator]() -> scalar_t {
192
+ at::normal_distribution<double> normal(mean, std);
193
+ return static_cast<scalar_t>(normal(generator));
194
+ });
195
+ }
196
+ });
197
+ }
198
+ }
199
+
200
+ template<typename RNG>
201
+ struct NormalKernel {
202
+ void operator()(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
203
+ normal_kernel(self, mean, std, check_generator<RNG>(gen));
204
+ }
205
+ };
206
+
207
+ // ==================================================== Uniform =======================================================
208
+
209
+ template<typename RNG>
210
+ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG generator) {
211
+ AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "uniform_kernel_cpu", [&]() {
212
+ std::lock_guard<std::mutex> lock(generator->mutex_);
213
+ auto from = static_cast<scalar_t>(from_);
214
+ auto to = static_cast<scalar_t>(to_);
215
+ at::uniform_real_distribution<scalar_t> uniform(from, to);
216
+ cpu_serial_kernel(iter, [&uniform, generator]() -> scalar_t {
217
+ return static_cast<scalar_t>(uniform(generator));
218
+ });
219
+ });
220
+ }
221
+
222
+ template<typename RNG>
223
+ struct UniformKernel {
224
+ void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
225
+ uniform_kernel(iter, from, to, check_generator<RNG>(gen));
226
+ }
227
+ };
228
+
229
+ // ==================================================== Cauchy ========================================================
230
+
231
+ template<typename RNG>
232
+ void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, RNG generator) {
233
+ AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "cauchy_cpu", [&]() {
234
+ std::lock_guard<std::mutex> lock(generator->mutex_);
235
+ at::cauchy_distribution<double> cauchy(median, sigma);
236
+ cpu_serial_kernel(iter, [&cauchy, generator]() -> scalar_t {
237
+ return static_cast<scalar_t>(cauchy(generator));
238
+ });
239
+ });
240
+ }
241
+
242
+ template<typename RNG>
243
+ struct CauchyKernel {
244
+ void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
245
+ cauchy_kernel(iter, median, sigma, check_generator<RNG>(gen));
246
+ }
247
+ };
248
+
249
+ // ================================================== LogNormal =======================================================
250
+
251
+ template<typename RNG>
252
+ void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, RNG generator) {
253
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cpu", [&]() {
254
+ std::lock_guard<std::mutex> lock(generator->mutex_);
255
+ at::lognormal_distribution<double> logNormal(mean, std);
256
+ cpu_serial_kernel(iter, [&logNormal, generator]() -> scalar_t {
257
+ return static_cast<scalar_t>(logNormal(generator));
258
+ });
259
+ });
260
+ }
261
+
262
+ template<typename RNG>
263
+ struct LogNormalKernel {
264
+ void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
265
+ log_normal_kernel(iter, mean, std, check_generator<RNG>(gen));
266
+ }
267
+ };
268
+
269
+ // =================================================== Geometric ======================================================
270
+
271
+ template<typename RNG>
272
+ void geometric_kernel(TensorIteratorBase& iter, double p, RNG generator) {
273
+ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cpu", [&]() {
274
+ std::lock_guard<std::mutex> lock(generator->mutex_);
275
+ at::geometric_distribution<double> geometric(p);
276
+ cpu_serial_kernel(iter, [&geometric, generator]() -> scalar_t {
277
+ return static_cast<scalar_t>(geometric(generator));
278
+ });
279
+ });
280
+ }
281
+
282
+ template<typename RNG>
283
+ struct GeometricKernel {
284
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
285
+ geometric_kernel(iter, p, check_generator<RNG>(gen));
286
+ }
287
+ };
288
+
289
+ // ================================================== Exponential =====================================================
290
+
291
+ template<typename RNG>
292
+ void exponential_kernel(TensorIteratorBase& iter, double lambda, RNG generator) {
293
+ TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype());
294
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cpu", [&]() {
295
+ std::lock_guard<std::mutex> lock(generator->mutex_);
296
+ at::exponential_distribution<double> exponential(lambda);
297
+ cpu_serial_kernel(iter, [&exponential, generator]() -> scalar_t {
298
+ return static_cast<scalar_t>(exponential(generator));
299
+ });
300
+ });
301
+ }
302
+
303
+ template<typename RNG>
304
+ struct ExponentialKernel {
305
+ void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
306
+ exponential_kernel(iter, lambda, check_generator<RNG>(gen));
307
+ }
308
+ };
309
+
310
+ // ================================================== Bernoulli =======================================================
311
+
312
+ template<typename RNG>
313
+ void bernoulli_kernel(const TensorBase &self, const TensorBase &p_, RNG generator) {
314
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half,
315
+ self.scalar_type(), "bernoulli_tensor_cpu_self_", [&] {
316
+ // See Note [Acquire lock when using random generators]
317
+ std::lock_guard<std::mutex> lock(generator->mutex_);
318
+ using self_t = scalar_t;
319
+ auto p_cpu = p_.to(kCPU);
320
+ auto p = expand_inplace(self, p_cpu);
321
+ auto iter = TensorIteratorConfig()
322
+ .add_output(self)
323
+ .add_input(*p)
324
+ .check_all_same_dtype(false)
325
+ .build();
326
+ if (p->scalar_type() == kDouble) {
327
+ cpu_serial_kernel(iter, [&](const double p_val) -> self_t {
328
+ at::bernoulli_distribution<double> bernoulli(p_val);
329
+ return static_cast<self_t>(bernoulli(generator));
330
+ });
331
+ } else {
332
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::BFloat16, at::ScalarType::Half,
333
+ p->scalar_type(), "bernoulli_tensor_cpu_p_", [&] {
334
+ using p_t = scalar_t;
335
+ cpu_serial_kernel(iter, [&](const p_t p_val) -> self_t {
336
+ at::bernoulli_distribution<float> bernoulli(p_val);
337
+ return static_cast<self_t>(bernoulli(generator));
338
+ });
339
+ });
340
+ }
341
+ });
342
+ }
343
+
344
+ template<typename RNG>
345
+ void bernoulli_kernel(const TensorBase &self, double p, RNG generator) {
346
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half,
347
+ self.scalar_type(), "bernoulli_scalar_cpu_", [&] {
348
+ // See Note [Acquire lock when using random generators]
349
+ std::lock_guard<std::mutex> lock(generator->mutex_);
350
+ auto iter = TensorIterator::borrowing_nullary_op(self);
351
+ cpu_serial_kernel(iter, [p, generator]() -> scalar_t {
352
+ at::bernoulli_distribution<double> bernoulli(p);
353
+ return static_cast<scalar_t>(bernoulli(generator));
354
+ });
355
+ });
356
+ }
357
+
358
+ template<typename RNG>
359
+ struct BernoulliKernel {
360
+ void operator()(const TensorBase &self, double p, c10::optional<Generator> gen) {
361
+ bernoulli_kernel(self, p, check_generator<RNG>(gen));
362
+ }
363
+ void operator()(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
364
+ bernoulli_kernel(self, p_, check_generator<RNG>(gen));
365
+ }
366
+ };
367
+
368
+ }}}}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IsContiguous.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at { namespace native { inline namespace CPU_CAPABILITY {
4
+
5
+ // n: number of function arguments (arity)
6
+ // traits: function_traits (see FunctionTraits.h)
7
+ // s: index of scalar argument or -1
8
+ template <int n, int stride_index, typename traits, int s=-1>
9
+ struct IsContiguous {
10
+ static bool eval(const int64_t* strides) {
11
+ using type = typename traits::template arg<n - 1>::type;
12
+ return strides[stride_index] == (s == n ? 0 : sizeof(type)) &&
13
+ IsContiguous<n - 1, stride_index - 1, traits, s>::eval(strides);
14
+ }
15
+ };
16
+
17
+ // will be called when there is an output exists
18
+ template <typename traits, int s>
19
+ struct IsContiguous<0, 0, traits, s> {
20
+ static bool eval(const int64_t* strides) {
21
+ return strides[0] == sizeof(typename traits::result_type);
22
+ }
23
+ };
24
+
25
+ // will be called when there is no output
26
+ template <typename traits, int s>
27
+ struct IsContiguous<0, -1, traits, s> {
28
+ static bool eval(const int64_t* /*strides*/) {
29
+ return true;
30
+ }
31
+ };
32
+
33
+ // output and all inputs are contiguous
34
+ template <typename traits,
35
+ typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
36
+ static inline bool is_contiguous(const int64_t* strides) {
37
+ return IsContiguous<traits::arity, traits::arity - 1, traits>::eval(strides);
38
+ }
39
+
40
+ template <typename traits,
41
+ typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
42
+ static inline bool is_contiguous(const int64_t* strides) {
43
+ return IsContiguous<traits::arity, traits::arity, traits>::eval(strides);
44
+ }
45
+
46
+ // input at `s` is scalar (stride 0); output and other inputs are contiguous
47
+ // NB: output is typically at strides[0] so first input corresponds to s=1
48
+ template <typename traits, int s,
49
+ typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
50
+ static inline bool is_contiguous_scalar(const int64_t* strides) {
51
+ static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
52
+ return IsContiguous<traits::arity, traits::arity - 1, traits, s>::eval(strides);
53
+ }
54
+
55
+ template <typename traits, int s,
56
+ typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
57
+ static inline bool is_contiguous_scalar(const int64_t* strides) {
58
+ static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
59
+ return IsContiguous<traits::arity, traits::arity, traits, s>::eval(strides);
60
+ }
61
+
62
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ReduceUtils.h ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <ATen/NumericUtils.h>
5
+ #include <ATen/cpu/vec/vec.h>
6
+ #include <ATen/cpu/vec/functional.h>
7
+ #include <ATen/native/ReductionType.h>
8
+ #include <c10/util/irange.h>
9
+ #include <ATen/OpMathType.h>
10
+ #include <ATen/native/cpu/utils.h>
11
+ #include <ATen/OpMathType.h>
12
+
13
+ namespace at::native {
14
+ inline namespace CPU_CAPABILITY {
15
+
16
+ using namespace vec;
17
+
18
+ #define AT_DISPATCH_REDUCTION_TYPES(op, ...) \
19
+ [&] { \
20
+ switch (op) { \
21
+ case ReductionType::SUM: { \
22
+ static constexpr auto reduce = ReductionType::SUM; \
23
+ return __VA_ARGS__(); \
24
+ } \
25
+ case ReductionType::MEAN: { \
26
+ static constexpr auto reduce = ReductionType::MEAN; \
27
+ return __VA_ARGS__(); \
28
+ } \
29
+ case ReductionType::MIN: { \
30
+ static constexpr auto reduce = ReductionType::MIN; \
31
+ return __VA_ARGS__(); \
32
+ } \
33
+ case ReductionType::MAX: { \
34
+ static constexpr auto reduce = ReductionType::MAX; \
35
+ return __VA_ARGS__(); \
36
+ } \
37
+ case ReductionType::PROD: { \
38
+ static constexpr auto reduce = ReductionType::PROD; \
39
+ return __VA_ARGS__(); \
40
+ } \
41
+ } \
42
+ }()
43
+
44
+ template <typename scalar_t, ReductionType reduce>
45
+ inline vec_scalar_t<scalar_t> init_value() {
46
+ using acc_t = vec_scalar_t<scalar_t>;
47
+ acc_t val;
48
+ if (reduce == ReductionType::SUM ||
49
+ reduce == ReductionType::MEAN) {
50
+ val = static_cast<acc_t>(0);
51
+ } else if (reduce == ReductionType::PROD) {
52
+ val = static_cast<acc_t>(1);
53
+ } else if (reduce == ReductionType::MAX) {
54
+ val = -std::numeric_limits<acc_t>::infinity();
55
+ } else {
56
+ TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
57
+ val = std::numeric_limits<acc_t>::infinity();
58
+ }
59
+ return val;
60
+ }
61
+
62
+ template <typename scalar_t, ReductionType reduce>
63
+ inline vec_scalar_t<scalar_t> init_value(const c10::optional<Scalar>& initial) {
64
+ using acc_t = vec_scalar_t<scalar_t>;
65
+ if (initial.has_value()) {
66
+ return initial.value().to<acc_t>();
67
+ } else {
68
+ return init_value<scalar_t, reduce>();
69
+ }
70
+ }
71
+
72
+ template <typename scalar_t>
73
+ inline void init(scalar_t* out, int64_t size, const vec_scalar_t<scalar_t>& val) {
74
+ using Vec = Vectorized<vec_scalar_t<scalar_t>>;
75
+ map<scalar_t>(
76
+ [val](Vec x) { return Vec(val); },
77
+ out,
78
+ out,
79
+ size);
80
+ }
81
+
82
+ template <typename scalar_t, ReductionType reduce>
83
+ inline void init(scalar_t* out, int64_t size, const c10::optional<Scalar>& initial) {
84
+ using acc_t = vec_scalar_t<scalar_t>;
85
+ acc_t val = init_value<scalar_t, reduce>(initial);
86
+ init(out, size, val);
87
+ }
88
+
89
+ // overload with `include_self`, used by scatter_reduce
90
+ template <typename scalar_t, ReductionType reduce>
91
+ inline void init(scalar_t* out, int64_t size, bool include_self = false) {
92
+ using acc_t = vec_scalar_t<scalar_t>;
93
+ if (!include_self) {
94
+ acc_t val = init_value<scalar_t, reduce>();
95
+ init(out, size, val);
96
+ }
97
+ }
98
+
99
+ template <typename scalar_t, ReductionType reduce>
100
+ inline void _init(scalar_t* self_ptr, at::opmath_type<scalar_t>* buffer_ptr, int64_t size, bool include_self) {
101
+ if (!include_self) {
102
+ init<at::opmath_type<scalar_t>, reduce>(buffer_ptr, size, include_self);
103
+ } else {
104
+ vec::convert(self_ptr, buffer_ptr, size);
105
+ }
106
+ }
107
+
108
+ template <typename scalar_t>
109
+ inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
110
+ _max(const scalar_t& x, const scalar_t& y) {
111
+ return at::_isnan(y) ? y : std::max(x, y);
112
+ }
113
+
114
+ template <typename scalar_t>
115
+ inline Vectorized<scalar_t> _max(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
116
+ // vec::maximum propagates NaN
117
+ return vec::maximum(x, y);
118
+ }
119
+
120
+ template <typename vec_t>
121
+ inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
122
+ _max(const vec_t& x, const vec_t& y) {
123
+ // vec::maximum propagates NaN
124
+ return maximum(x, y);
125
+ }
126
+
127
+ template <typename scalar_t>
128
+ inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
129
+ _min(const scalar_t& x, const scalar_t& y) {
130
+ return at::_isnan(y) ? y : std::min(x, y);
131
+ }
132
+
133
+ template <typename scalar_t>
134
+ inline Vectorized<scalar_t> _min(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
135
+ // vec::minimum propagates NaN
136
+ return vec::minimum(x, y);
137
+ }
138
+
139
+ template <typename vec_t>
140
+ inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
141
+ _min(const vec_t& x, const vec_t& y) {
142
+ // vec::minimum propagates NaN
143
+ return minimum(x, y);
144
+ }
145
+
146
+ template <typename scalar_t, typename accumut, typename Op,
147
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
148
+ inline void map_acc(
149
+ const Op& vec_fun,
150
+ accumut* output_data,
151
+ const accumut* input_data,
152
+ const scalar_t* input_data2,
153
+ int64_t size) {
154
+ using Vec = vec::Vectorized<scalar_t>;
155
+ using aVec = vec::Vectorized<accumut>;
156
+ int64_t d = 0;
157
+ constexpr int64_t kVecSize = Vec::size();
158
+ constexpr int64_t kaVecSize = aVec::size();
159
+ for (d = 0; d < size - (size % kVecSize); d += kVecSize) {
160
+ Vec data2_vec = Vec::loadu(input_data2 + d);
161
+ aVec data2_avec0, data2_avec1;
162
+ std::tie(data2_avec0, data2_avec1) = convert_to_float<scalar_t>(data2_vec);
163
+ aVec input_vec0 = aVec::loadu(input_data + d);
164
+ aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize);
165
+ vec_fun(input_vec0, data2_avec0).store(output_data + d);
166
+ vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize);
167
+ }
168
+ if (size - d > 0) {
169
+ int64_t tail_size = size - d;
170
+ Vec data2_vec = Vec::loadu(input_data2 + d, tail_size);
171
+ aVec data2_avec0, data2_avec1;
172
+ std::tie(data2_avec0, data2_avec1) = convert_to_float<scalar_t>(data2_vec);
173
+ if (tail_size > kaVecSize) {
174
+ aVec input_vec0 = aVec::loadu(input_data + d);
175
+ aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize, tail_size - kaVecSize);
176
+ vec_fun(input_vec0, data2_avec0).store(output_data + d);
177
+ vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize, tail_size - kaVecSize);
178
+ } else {
179
+ aVec input_vec0 = aVec::loadu(input_data + d, tail_size);
180
+ vec_fun(input_vec0, data2_avec0).store(output_data + d, tail_size);
181
+ }
182
+ }
183
+ }
184
+
185
+ // for Max and Min, propagate NaN:
186
+ template <typename T, ReductionType reduce>
187
+ inline T update(const T& x, const T& y) {
188
+ if (reduce == ReductionType::SUM ||
189
+ reduce == ReductionType::MEAN) {
190
+ return x + y;
191
+ } else if (reduce == ReductionType::PROD) {
192
+ return x * y;
193
+ } else if (reduce == ReductionType::MAX) {
194
+ return _max(x, y);
195
+ } else {
196
+ TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
197
+ return _min(x, y);
198
+ }
199
+ }
200
+
201
+ template <typename scalar_t, ReductionType reduce>
202
+ inline void update(scalar_t* out, scalar_t* data, int64_t K) {
203
+ using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
204
+ map2<scalar_t>(
205
+ [](Vec x, Vec y) { return update<Vec, reduce>(x, y); },
206
+ out,
207
+ out,
208
+ data,
209
+ K);
210
+ }
211
+
212
+ template <typename scalar_t, ReductionType reduce,
213
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
214
+ inline void update(at::opmath_type<scalar_t>* out, scalar_t* data, int64_t K) {
215
+ using opmath_t = at::opmath_type<scalar_t>;
216
+ using Vec = vec::Vectorized<opmath_t>;
217
+ map_acc<scalar_t, opmath_t>(
218
+ [](Vec x, Vec y) { return update<Vec, reduce>(x, y); },
219
+ out,
220
+ out,
221
+ data,
222
+ K);
223
+ }
224
+
225
+ template <typename scalar_t, ReductionType reduce>
226
+ inline void write(scalar_t* out, int64_t count, int64_t K) {
227
+ using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
228
+ if (reduce == ReductionType::MEAN) {
229
+ if (count > 0) {
230
+ vec::map<scalar_t>(
231
+ [count](Vec x) { return x / Vec(count); },
232
+ out,
233
+ out,
234
+ K);
235
+ }
236
+ }
237
+ }
238
+
239
+ } // namespace CPU_CAPABILITY
240
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SoftmaxKernel.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <cstdint>
5
+
6
+ namespace at {
7
+ class Tensor;
8
+
9
+ namespace native {
10
+
11
+ using forward_fn = void (*)(const Tensor&, const Tensor&);
12
+ using backward_fn = void(*)(const Tensor &, const Tensor &, const Tensor&);
13
+
14
+ DECLARE_DISPATCH(forward_fn, softmax_lastdim_kernel);
15
+ DECLARE_DISPATCH(forward_fn, log_softmax_lastdim_kernel);
16
+ DECLARE_DISPATCH(backward_fn, softmax_backward_lastdim_kernel);
17
+ DECLARE_DISPATCH(backward_fn, log_softmax_backward_lastdim_kernel);
18
+
19
+ using forward_fn_with_dim = void(*)(const Tensor &, const Tensor &, const int64_t);
20
+ using backward_fn_with_dim =
21
+ void (*)(const Tensor&, const Tensor&, const Tensor&, const int64_t);
22
+
23
+ DECLARE_DISPATCH(forward_fn_with_dim, softmax_kernel);
24
+ DECLARE_DISPATCH(forward_fn_with_dim, log_softmax_kernel);
25
+ DECLARE_DISPATCH(backward_fn_with_dim, softmax_backward_kernel);
26
+ DECLARE_DISPATCH(backward_fn_with_dim, log_softmax_backward_kernel);
27
+ }
28
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SpmmReduceKernel.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <ATen/native/ReductionType.h>
6
+
7
+ namespace at::native {
8
+
9
+ using spmm_reduce_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
10
+ using spmm_reduce_arg_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
11
+ using spmm_reduce_backward_input_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
12
+ using spmm_reduce_backward_input_arg_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
13
+ using spmm_reduce_backward_other_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
14
+
15
+ DECLARE_DISPATCH(spmm_reduce_fn, spmm_reduce_stub);
16
+ DECLARE_DISPATCH(spmm_reduce_arg_fn, spmm_reduce_arg_stub);
17
+ DECLARE_DISPATCH(spmm_reduce_backward_input_fn, spmm_reduce_backward_input_stub);
18
+ DECLARE_DISPATCH(spmm_reduce_backward_input_arg_fn, spmm_reduce_backward_input_arg_stub);
19
+ DECLARE_DISPATCH(spmm_reduce_backward_other_fn, spmm_reduce_backward_other_stub);
20
+ DECLARE_DISPATCH(spmm_reduce_backward_input_arg_fn, spmm_reduce_backward_other_arg_stub);
21
+
22
+ } // at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/StackKernel.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2004-present Facebook. All Rights Reserved.
2
+ #pragma once
3
+
4
+ #include <ATen/core/Tensor.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+
7
+ namespace at { namespace native {
8
+
9
+ using stack_serial_fn = void(*)(Tensor &, TensorList, int64_t);
10
+ DECLARE_DISPATCH(stack_serial_fn, stack_serial_stub);
11
+
12
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/mixed_data_type.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+
5
+ namespace at { namespace native {
6
+
7
+ inline ScalarType first_type() {
8
+ return ScalarType::Undefined;
9
+ }
10
+
11
+ template <typename... Args>
12
+ inline ScalarType first_type(const Tensor& arg, const Args&... parameters) {
13
+ return arg.defined() ? arg.scalar_type() : first_type(parameters...);
14
+ }
15
+
16
+ template <typename... Args>
17
+ inline bool is_mixed_type(const Tensor& input, const Args&... parameters) {
18
+ const auto parameter_type = first_type(parameters...);
19
+ return ((parameter_type != ScalarType::Undefined) &&
20
+ (parameter_type != input.scalar_type()));
21
+ }
22
+
23
+ // currently on CPU, mixed data type is only supported
24
+ // when input is 'BFloat16' or 'Half' and parameters are 'Float'
25
+ inline void check_mixed_data_type(const Tensor& input) {
26
+ TORCH_CHECK(at::isReducedFloatingType(input.scalar_type()),
27
+ "mixed dtype (CPU): all inputs must share same datatype.");
28
+ }
29
+
30
+ template <typename... Args>
31
+ inline void check_mixed_data_type(const Tensor& input, const Tensor& parameter, const Args&... parameters) {
32
+ TORCH_CHECK(!parameter.defined() || parameter.scalar_type() == ScalarType::Float,
33
+ "mixed dtype (CPU): expect parameter to have scalar type of Float");
34
+ check_mixed_data_type(input, parameters...);
35
+ }
36
+
37
+ inline ScalarType param_scalar_type(const Tensor& t, bool is_mixed_type) {
38
+ return is_mixed_type ? ScalarType::Float : t.scalar_type();
39
+ }
40
+
41
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Activation.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/Activation.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ struct TensorIteratorBase;
7
+ class TensorBase;
8
+ }
9
+
10
+ namespace at { namespace native {
11
+
12
+ void launch_glu_backward_kernel(const TensorIteratorBase& iter,
13
+ int64_t gI_stride, int64_t I_stride);
14
+
15
+ void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter);
16
+
17
+ void GeluCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
18
+ void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
19
+
20
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Copy.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ struct TensorIteratorBase;
5
+
6
+ namespace native {
7
+
8
+ void direct_copy_kernel_cuda(TensorIteratorBase &iter);
9
+
10
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Distributions.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ struct CUDAGeneratorImpl;
5
+ struct TensorIteratorBase;
6
+ class TensorBase;
7
+
8
+ namespace native {
9
+
10
+ void launch_poisson_cuda_kernel(
11
+ const TensorBase &ret, const TensorBase &lambda, CUDAGeneratorImpl *gen);
12
+
13
+ void launch_gamma_kernel(
14
+ const TensorBase &ret, const TensorBase &alpha, CUDAGeneratorImpl *gen);
15
+
16
+ void launch_binomial_cuda_kernel(
17
+ TensorIteratorBase &iter, CUDAGeneratorImpl *gen);
18
+
19
+ void launch_dirichlet_kernel(TensorIteratorBase &iter);
20
+
21
+ void launch_standard_gamma_grad_kernel(TensorIteratorBase &iter);
22
+
23
+ void launch_dirichlet_grad_kernel(TensorIteratorBase &iter);
24
+
25
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/cuda/Atomic.cuh>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/TensorUtils.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ Tensor embedding_backward_cuda_kernel(
11
+ const Tensor &grad,
12
+ const Tensor &orig_indices,
13
+ const Tensor &sorted_indices,
14
+ const Tensor &count,
15
+ int64_t num_weights,
16
+ int padding_idx = -1,
17
+ bool mode_mean = false,
18
+ const Tensor &offset2bag = Tensor(),
19
+ const Tensor &bag_size = Tensor(),
20
+ const Tensor &per_sample_weights = Tensor());
21
+
22
+ }}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/IndexKernel.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/ScalarType.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ struct TensorIteratorBase;
7
+ class TensorBase;
8
+ }
9
+
10
+ namespace at {
11
+ namespace native {
12
+ /// @param maskPrefixSum[in,out]
13
+ void launch_masked_scatter_kernel(
14
+ const TensorBase &self, const TensorBase &mask,
15
+ const TensorBase &maskPrefixSum, const TensorBase &source);
16
+ }}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Math.cuh ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MemoryAccess.cuh ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <type_traits>
5
+ #include <c10/core/DynamicCast.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/TypeCast.h>
8
+ #include <c10/macros/Macros.h>
9
+ #include <ATen/core/Array.h>
10
+ #include <ATen/detail/FunctionTraits.h>
11
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
12
+ #include <ATen/native/cuda/thread_constants.h>
13
+
14
+ #include <thrust/tuple.h>
15
+
16
+ // References:
17
+ // https://devblogs.nvidia.com/cuda-pro-tip-increase-performance-with-vectorized-memory-access/
18
+
19
+ namespace at { namespace native { namespace memory {
20
+
21
+ namespace detail {
22
+
23
+ // What does the `static_unroll` do?
24
+ //
25
+ // We want to do something like:
26
+ //
27
+ // using args_t = typename traits::ArgsTuple;
28
+ // args_t args;
29
+ // #pragma unroll
30
+ // for (int i = 0; i < traits::arity; i++) {
31
+ // std::get<i>(args) = ....
32
+ // }
33
+ //
34
+ // but unfortunately the above code does not work because
35
+ // the template argument has to be a compile time constant
36
+ // so `static_unroll` is created to simulate `#pragma unroll`
37
+ // using template metaprogramming.
38
+
39
+ template<template<int i> typename func, int end, int current=0>
40
+ struct static_unroll {
41
+ template<typename... Args>
42
+ static inline C10_HOST_DEVICE void with_args(Args&&... args) {
43
+ func<current>::apply(std::forward<Args>(args)...);
44
+ static_unroll<func, end, current+1>::with_args(args...);
45
+ }
46
+ };
47
+
48
+ template<template<int i> typename func, int end>
49
+ struct static_unroll<func, end, end> {
50
+ template<typename... Args>
51
+ static inline C10_HOST_DEVICE void with_args(Args... args) {}
52
+ };
53
+
54
+ // helper structs to be used with static_unroll to load arguments
55
+ // one by one
56
+
57
+ template<int arg_index>
58
+ struct vectorized_load_helper {
59
+ template <typename args_t, typename policy_t>
60
+ static __device__ void apply(policy_t &self, args_t *args, int idx) {
61
+ using arg_t = std::tuple_element_t<arg_index, args_t>;
62
+ // `data` hold the data_ptr for tensors [output, input0, input1, ...], so we
63
+ // need a +1 offset to get the input
64
+ auto ptr = reinterpret_cast<arg_t *>(self.data[arg_index + 1]) + block_work_size() * idx;
65
+ auto args_accessor = [&args] __device__ (int thread_unroll_idx) -> arg_t & { return std::get<arg_index>(args[thread_unroll_idx]); };
66
+ self.load_single_arg(args_accessor, ptr);
67
+ }
68
+ };
69
+
70
+ template<int arg_index>
71
+ struct unroll_load_helper {
72
+ template <typename args_t, typename policy_t, typename offset_t, typename loader_t>
73
+ static __device__ void apply(policy_t &self, args_t *args, offset_t offset, loader_t loader, int j, int num_outputs) {
74
+ using arg_t = std::tuple_element_t<arg_index, args_t>;
75
+ // `data` hold the data_ptr for tensors [output, input0, input1, ...], so we
76
+ // need a +1 offset to get the input
77
+ std::get<arg_index>(args[j]) = loader.template load<arg_t>(self.data[arg_index + num_outputs], offset[arg_index], arg_index);
78
+ }
79
+ };
80
+
81
+ template <int current>
82
+ struct multi_outputs_store_helper {
83
+ template<int ntensors, int num_outputs, typename ...Args>
84
+ C10_HOST_DEVICE static void apply(
85
+ at::detail::Array<char*, ntensors> data,
86
+ at::detail::Array<uint32_t, num_outputs> offsets,
87
+ thrust::tuple<Args...> ret) {
88
+ using T = typename thrust::tuple_element<current, thrust::tuple<Args...>>::type;
89
+ T *to = reinterpret_cast<T *>(data[current]) + offsets[current];
90
+ *to = thrust::get<current>(ret);
91
+ }
92
+ };
93
+
94
+ } // namespace detail
95
+
96
+ struct LoadWithoutCast {
97
+ template<typename scalar_t>
98
+ __device__ scalar_t load(char *base_ptr, uint32_t offset, int arg) {
99
+ return c10::load(reinterpret_cast<scalar_t *>(base_ptr) + offset);
100
+ }
101
+ };
102
+
103
+ template <int N>
104
+ struct LoadWithCast {
105
+ using array_t = at::detail::Array<at::ScalarType, std::max<int>(N, 1)>;
106
+ using size_array_t = at::detail::Array<uint32_t, std::max<int>(N, 1)>;
107
+
108
+ array_t dtypes;
109
+ size_array_t element_sizes;
110
+
111
+ LoadWithCast(const TensorIteratorBase& iter) {
112
+ assert(iter.ninputs() == N);
113
+ #pragma unroll
114
+ for (auto i = 0; i < N; ++i) {
115
+ this->dtypes[i] = iter.dtype(i + iter.noutputs());
116
+ element_sizes[i] = c10::elementSize(iter.dtype(i + iter.noutputs()));
117
+ }
118
+ }
119
+
120
+ template<typename scalar_t>
121
+ __device__ scalar_t load(char *base_ptr, uint32_t offset, int arg) {
122
+ void *ptr = base_ptr + element_sizes[arg] * offset;
123
+ return c10::fetch_and_cast<scalar_t>(dtypes[arg], ptr);
124
+ }
125
+ };
126
+
127
+ struct StoreWithoutCast {
128
+ template<typename scalar_t>
129
+ __device__ void store(scalar_t value, char *base_ptr, uint32_t offset, int arg = 0) {
130
+ *(reinterpret_cast<scalar_t *>(base_ptr) + offset) = value;
131
+ }
132
+ };
133
+
134
+ template <int N = 1>
135
+ struct StoreWithCast {
136
+ using array_t = at::detail::Array<at::ScalarType, std::max<int>(N, 1)>;
137
+ using size_array_t = at::detail::Array<uint32_t, std::max<int>(N, 1)>;
138
+
139
+ array_t dtypes;
140
+ size_array_t element_sizes;
141
+
142
+ StoreWithCast(const TensorIteratorBase& iter) {
143
+ assert(iter.noutputs() == N);
144
+ #pragma unroll
145
+ for (auto i = 0; i < N; ++i) {
146
+ this->dtypes[i] = iter.dtype(i);
147
+ element_sizes[i] = c10::elementSize(iter.dtype(i));
148
+ }
149
+ }
150
+
151
+ template<typename scalar_t>
152
+ __device__ void store(scalar_t value, char *base_ptr, uint32_t offset, int arg = 0) {
153
+ void *ptr = base_ptr + element_sizes[arg] * offset;
154
+ c10::cast_and_store<scalar_t>(dtypes[arg], ptr, value);
155
+ }
156
+ };
157
+
158
+ // aligned vector generates vectorized load/store on CUDA
159
+ template<typename scalar_t, int vec_size>
160
+ struct alignas(sizeof(scalar_t) * vec_size) aligned_vector {
161
+ scalar_t val[vec_size];
162
+ };
163
+
164
+ template <int vec_size, typename scalar_t>
165
+ __device__ aligned_vector<scalar_t, vec_size> load_vector(const scalar_t *base_ptr, uint32_t offset) {
166
+ using vec_t = aligned_vector<scalar_t, vec_size>;
167
+ auto *from = reinterpret_cast<const vec_t *>(base_ptr);
168
+ return from[offset];
169
+ }
170
+
171
+ template <int vec_size>
172
+ __device__ aligned_vector<bool, vec_size> load_vector(const bool *base_ptr, uint32_t offset) {
173
+ // See NOTE [Loading boolean values]
174
+ auto tmp = load_vector<vec_size>(reinterpret_cast<const uint8_t*>(base_ptr), offset);
175
+ aligned_vector<bool, vec_size> ret;
176
+ for (int i = 0; i < vec_size; ++i) {
177
+ ret.val[i] = bool(tmp.val[i]);
178
+ }
179
+ return ret;
180
+ }
181
+
182
+ namespace policies {
183
+
184
+ // Assumption:
185
+ // all tensors are contiguous, that is: stride == sizeof(type) for all tensors
186
+ template<typename data_t, typename inp_calc_t, typename out_calc_t, typename loader_t, typename storer_t, int num_outputs = 1>
187
+ struct unroll {
188
+
189
+ data_t data;
190
+ int remaining;
191
+ inp_calc_t input_offset_calculator;
192
+ out_calc_t output_offset_calculator;
193
+ loader_t loader;
194
+ storer_t storer;
195
+
196
+ __device__ unroll(data_t data, int remaining, inp_calc_t ic, out_calc_t oc, loader_t l, storer_t s):
197
+ data(data), remaining(remaining), input_offset_calculator(ic), output_offset_calculator(oc), loader(l), storer(s) {}
198
+
199
+ __device__ inline bool check_inbounds(int thread_work_elem) {
200
+ return ((threadIdx.x + thread_work_elem*num_threads()) < remaining);
201
+ }
202
+
203
+ template<typename args_t>
204
+ __device__ inline void load(args_t *args, int idx) {
205
+ constexpr int arity = std::tuple_size<args_t>::value;
206
+ int thread_idx = threadIdx.x;
207
+ #pragma unroll
208
+ for (int i = 0; i < thread_work_size(); i++) {
209
+ if (thread_idx >= remaining) {
210
+ return;
211
+ }
212
+ int linear_idx = thread_idx + block_work_size() * idx;
213
+ auto offset = input_offset_calculator.get(linear_idx);
214
+ detail::static_unroll<detail::unroll_load_helper, arity>::with_args(*this, args, offset, loader, i, num_outputs);
215
+ thread_idx += num_threads();
216
+ }
217
+ }
218
+
219
+ template<typename scalar_t>
220
+ __device__ inline void store(scalar_t *from, int idx) {
221
+ int thread_idx = threadIdx.x;
222
+ scalar_t *to = reinterpret_cast<scalar_t *>(data[0]) + block_work_size() * idx;
223
+ #pragma unroll
224
+ for (int i = 0; i < thread_work_size(); i++) {
225
+ if (thread_idx >= remaining) {
226
+ return;
227
+ }
228
+ int linear_idx = thread_idx + block_work_size() * idx;
229
+ int offset = output_offset_calculator.get(linear_idx)[0];
230
+ storer.store(from[i], data[0], offset);
231
+ thread_idx += num_threads();
232
+ }
233
+ }
234
+ };
235
+
236
+ // Assumption:
237
+ // all tensors are contiguous, that is: stride == sizeof(type) for all tensors
238
+ // Note:
239
+ // Functions in vectorized policy does not do boundary check. It assumes the whole block
240
+ // has its job to do. So the reminders should be handled by the caller manually.
241
+ template <int vec_size, typename data_t> // vec_size: number of scalars, can be 1, 2, or 4.
242
+ struct vectorized {
243
+
244
+ static_assert(thread_work_size() % vec_size == 0, "The workload per thread must be a multiple of vec_size");
245
+ static constexpr int loop_size = thread_work_size() / vec_size;
246
+
247
+ data_t data;
248
+
249
+ __device__ vectorized(data_t data) : data(data) {}
250
+
251
+ __device__ inline constexpr bool check_inbounds(int thread_work_elem) {
252
+ return true;
253
+ }
254
+
255
+ template<typename accessor_t, typename scalar_t>
256
+ __device__ inline void load_single_arg(accessor_t to, scalar_t *from) {
257
+ int thread_idx = threadIdx.x;
258
+ #pragma unroll
259
+ for (int i = 0; i < loop_size; i++) {
260
+ int index = thread_idx + i * num_threads();
261
+ auto v = load_vector<vec_size>(from, index);
262
+ #pragma unroll
263
+ for (int j = 0; j < vec_size; j++) {
264
+ to(vec_size * i + j) = v.val[j];
265
+ }
266
+ }
267
+ }
268
+
269
+ template<typename args_t>
270
+ __device__ inline void load(args_t *args, int idx) {
271
+ constexpr int arity = std::tuple_size<args_t>::value;
272
+ detail::static_unroll<detail::vectorized_load_helper, arity>::with_args(*this, args, idx);
273
+ }
274
+
275
+ template<typename scalar_t>
276
+ __device__ inline void store(scalar_t *from, int idx) {
277
+ using vec_t = aligned_vector<scalar_t, vec_size>;
278
+ scalar_t *to = reinterpret_cast<scalar_t *>(data[0]) + block_work_size() * idx;
279
+ vec_t *to_ = reinterpret_cast<vec_t *>(to);
280
+ int thread_idx = threadIdx.x;
281
+ #pragma unroll
282
+ for (int i = 0; i < loop_size; i++) {
283
+ int index = thread_idx + i * num_threads();
284
+ vec_t v;
285
+ for (int j = 0; j < vec_size; j++) {
286
+ v.val[j] = from[vec_size * i + j];
287
+ }
288
+ to_[index] = v;
289
+ }
290
+ }
291
+ };
292
+
293
+ template <typename data_t, typename inp_calc_t, typename out_calc_t, int num_outputs>
294
+ struct multi_outputs_unroll {
295
+ //multi_outputs_unroll struct members and check_inbounds and load methods are copypasted from unroll struct
296
+ //we don't use inheritance because of compiler bug in cuda 10.2+
297
+ data_t data;
298
+ int remaining;
299
+ inp_calc_t input_offset_calculator;
300
+ out_calc_t output_offset_calculator;
301
+ LoadWithoutCast loader;
302
+ StoreWithoutCast storer;
303
+
304
+ __device__ multi_outputs_unroll(data_t data, int remaining, inp_calc_t ic, out_calc_t oc):
305
+ data(data), remaining(remaining), input_offset_calculator(ic), output_offset_calculator(oc) {}
306
+
307
+ __device__ inline bool check_inbounds(int thread_work_elem) {
308
+ return ((threadIdx.x + thread_work_elem*num_threads()) < remaining);
309
+ }
310
+
311
+ template<typename args_t>
312
+ __device__ inline void load(args_t *args, int idx) {
313
+ constexpr int arity = std::tuple_size<args_t>::value;
314
+ int thread_idx = threadIdx.x;
315
+ #pragma unroll
316
+ for (int i = 0; i < thread_work_size(); i++) {
317
+ if (thread_idx >= remaining) {
318
+ return;
319
+ }
320
+ int linear_idx = thread_idx + block_work_size() * idx;
321
+ auto offset = input_offset_calculator.get(linear_idx);
322
+ detail::static_unroll<detail::unroll_load_helper, arity>::with_args(*this, args, offset, loader, i, num_outputs);
323
+ thread_idx += num_threads();
324
+ }
325
+ }
326
+
327
+
328
+ template <typename return_t>
329
+ __device__ inline void store(return_t *from, int idx) {
330
+ int thread_idx = threadIdx.x;
331
+ #pragma unroll
332
+ for (int i = 0; i < thread_work_size(); i++) {
333
+ if (thread_idx >= this->remaining) {
334
+ return;
335
+ }
336
+ int linear_idx = thread_idx + block_work_size() * idx;
337
+ auto offsets = this->output_offset_calculator.get(linear_idx);
338
+ memory::detail::static_unroll<detail::multi_outputs_store_helper, num_outputs>::with_args(this->data, offsets, from[i]);
339
+ thread_idx += num_threads();
340
+ }
341
+ }
342
+ };
343
+
344
+ } // namespace policies
345
+
346
+ // This is only used in host, but we will wrap this into some templates
347
+ // which is C10_HOST_DEVICE, so we have to make this C10_HOST_DEVICE
348
+ // in order to compile
349
+ template<typename scalar_t>
350
+ inline C10_HOST_DEVICE int can_vectorize_up_to(char *pointer) {
351
+ uint64_t address = reinterpret_cast<uint64_t>(pointer);
352
+ constexpr int vec2_alignment = std::alignment_of<aligned_vector<scalar_t, 2>>::value;
353
+ constexpr int vec4_alignment = std::alignment_of<aligned_vector<scalar_t, 4>>::value;
354
+ if (address % vec4_alignment == 0) {
355
+ return 4;
356
+ } else if (address % vec2_alignment == 0) {
357
+ return 2;
358
+ }
359
+ return 1;
360
+ }
361
+
362
+ template<int i>
363
+ struct can_vectorize_up_to_helper {
364
+ template <typename array_t, typename traits>
365
+ static C10_HOST_DEVICE void apply(int &result, array_t pointers, traits _) {
366
+ using arg_t = typename traits::template arg<i>::type;
367
+ // `pointers` hold the data_ptr for tensors [output, input0, input1, ...], so we
368
+ // need a +1 offset to get the input
369
+ result = std::min<int>(result, can_vectorize_up_to<arg_t>(pointers[i + 1]));
370
+ }
371
+ };
372
+
373
+ template<typename func_t, typename array_t>
374
+ inline int can_vectorize_up_to(array_t pointers) {
375
+ using traits = function_traits<func_t>;
376
+ using return_t = typename traits::result_type;
377
+ constexpr int arity = traits::arity;
378
+ int result = can_vectorize_up_to<return_t>(pointers[0]);
379
+ // We need to get the type for each argument of `func_t`, this can only
380
+ // be done at compile time.
381
+ detail::static_unroll<can_vectorize_up_to_helper, arity>::with_args(result, pointers, traits());
382
+ return result;
383
+ }
384
+
385
+ }}} // namespace at::native::memory
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/PersistentSoftmax.cuh ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cfloat>
4
+ #include <limits>
5
+ #include <stdint.h>
6
+ #include <cuda_fp16.h>
7
+ #include <c10/macros/Macros.h>
8
+
9
+ #include <ATen/cuda/DeviceUtils.cuh>
10
+
11
+ namespace {
12
+
13
+ int log2_ceil(int value) {
14
+ int log2_value = 0;
15
+ while ((1 << log2_value) < value) ++log2_value;
16
+ return log2_value;
17
+ }
18
+
19
+ template<typename T>
20
+ struct Add {
21
+ __device__ __forceinline__ T operator()(T a, T b) const {
22
+ return a + b;
23
+ }
24
+ };
25
+
26
+ template<typename T>
27
+ struct Max {
28
+ __device__ __forceinline__ T operator()(T a, T b) const {
29
+ return a < b ? b : a;
30
+ }
31
+ };
32
+
33
+ template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
34
+ __device__ __forceinline__ void warp_reduce(acc_t* sum) {
35
+ ReduceOp<acc_t> r;
36
+ #pragma unroll
37
+ for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
38
+ #pragma unroll
39
+ for (int i = 0; i < WARP_BATCH; ++i) {
40
+ acc_t b = WARP_SHFL_XOR(sum[i], offset, WARP_SIZE);
41
+ sum[i] = r(sum[i], b);
42
+ }
43
+ }
44
+ }
45
+
46
+ // The softmax_warp_* methods perform softmax forward and backward propagation on samples spanning the fast dimension.
47
+ // Each sample contains element_count scalar elements. element_count can be any integer value <= 1024.
48
+ // The template arguments have the following meaning:
49
+ // One "WARP" works on one "BATCH". One "BATCH" contains "WARP_BATCH" samples.
50
+ // WARP_BATCH is equal to 1 when element_count is large, and > 1 when element_count is small.
51
+ // A "WARP" contains "C10_WARPS_SIZE" threads, these treads are guaranteed to belong to the same warp.
52
+ // This is important because it means only __shfl_ instructions are required for reductions.
53
+ // Note that this means WARP_SIZE must be a power of two and <= architecture warp size.
54
+ // CUDA warp size is 32 for all existing GPU architectures, but there is no guarantee this will not change for future arch.
55
+ // ROCm warp size is 64 for all currently ROCm-supported GPU architectures, but this may change for future archs.
56
+ // is_log_softmax is a flag indicating whether SoftMax or LogSoftMax should be computed.
57
+ // is_masked is a flag indicating whether SoftMax or MaskedSoftMax should be computed.
58
+ // The template can be instantiated with any floating point type for the type arguments input_t, output_t and acc_t.
59
+ // This allows SoftMax to be fused with a cast immediately following the SoftMax.
60
+ // The mask should have the same shape as input, with a boolean indicate if the value is masked.
61
+ // The head_chunk_size is only used for transformer mask softmax, equals to H * D * D.
62
+ // For instance:
63
+ // input_t=half, acc_t=float, output_t=half => read half tensor, float accumulators, write half tensor.
64
+ // input_t=half, acc_t=float, output_t=float => read half tensor, float accumulators, write float tensor.
65
+ // input_t_float, acc_t=float, output_t=half => read float tensor, float accumulators, write half tensor.
66
+
67
+ template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax, bool is_masked>
68
+ __global__ void softmax_warp_forward(output_t *dst, const input_t *src, int batch_size, int stride, int element_count, const bool *mask = nullptr, const int head_chunk_size = -1, bool is_transformer_mask = false)
69
+ {
70
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_forward_kernel.
71
+ constexpr int next_power_of_two = 1 << log2_elements;
72
+ constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
73
+ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
74
+ constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
75
+
76
+ int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
77
+
78
+ // batch_size might not be a multiple of WARP_BATCH. Check how
79
+ // many batches have to computed within this WARP.
80
+ int local_batches = batch_size - first_batch;
81
+ if (local_batches > WARP_BATCH)
82
+ local_batches = WARP_BATCH;
83
+
84
+ // there might be multiple batches per warp. compute the index within the batch
85
+ int local_idx = threadIdx.x;
86
+ int idx_offset = first_batch * stride + local_idx;
87
+
88
+ src += idx_offset;
89
+ dst += idx_offset;
90
+
91
+ if (is_transformer_mask) {
92
+ mask += ((first_batch * stride) / head_chunk_size) * stride + local_idx;
93
+ } else {
94
+ mask += idx_offset;
95
+ }
96
+ // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
97
+ // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
98
+ // the nested loops.
99
+ // This should have no impact on performance because the loops are unrolled anyway.
100
+
101
+ // load data from global memory
102
+ acc_t elements[WARP_BATCH][WARP_ITERATIONS];
103
+ for (int i = 0; i < WARP_BATCH; ++i) {
104
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
105
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
106
+ int element_index = local_idx + it * WARP_SIZE;
107
+ if (element_index < batch_element_count) {
108
+ elements[i][it] = src[i*element_count+it*WARP_SIZE];
109
+ } else {
110
+ elements[i][it] = -std::numeric_limits<acc_t>::infinity();
111
+ }
112
+ }
113
+ }
114
+
115
+ // compute max_value
116
+ acc_t max_value[WARP_BATCH];
117
+ #pragma unroll
118
+ for (int i = 0; i < WARP_BATCH; ++i) {
119
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
120
+ bool is_meaningful_max = false;
121
+ max_value[i] = elements[i][0];
122
+ #pragma unroll
123
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
124
+ if (is_masked) {
125
+ int idx = it*WARP_SIZE;
126
+ if ((idx + local_idx) < batch_element_count) {
127
+ if (!is_transformer_mask) {
128
+ idx += i*element_count;
129
+ }
130
+ if (!mask[idx]) {
131
+ max_value[i] = (is_meaningful_max && max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
132
+ is_meaningful_max = true;
133
+ }
134
+ }
135
+ } else {
136
+ max_value[i] = max_value[i] > elements[i][it] ? max_value[i] : elements[i][it];
137
+ }
138
+ }
139
+ if (is_masked) {
140
+ if (!is_meaningful_max) {
141
+ max_value[i] = -std::numeric_limits<acc_t>::infinity();
142
+ }
143
+ }
144
+ }
145
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
146
+
147
+ acc_t sum[WARP_BATCH] { 0.0f };
148
+ #pragma unroll
149
+ for (int i = 0; i < WARP_BATCH; ++i) {
150
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
151
+ #pragma unroll
152
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
153
+ if (!is_masked) {
154
+ if (is_log_softmax) {
155
+ sum[i] += std::exp(elements[i][it] - max_value[i]);
156
+ } else {
157
+ elements[i][it] = std::exp(elements[i][it] - max_value[i]);
158
+ sum[i] += elements[i][it];
159
+ }
160
+ } else {
161
+ int idx = it*WARP_SIZE;
162
+ bool valid = (idx + local_idx) < batch_element_count;
163
+ if (!is_transformer_mask) {
164
+ idx += i*element_count;
165
+ }
166
+ if (valid) {
167
+ if (!mask[idx]) {
168
+ if (is_log_softmax) {
169
+ sum[i] += std::exp(elements[i][it] - max_value[i]);
170
+ } else {
171
+ elements[i][it] = std::exp(elements[i][it] - max_value[i]);
172
+ sum[i] += elements[i][it];
173
+ }
174
+ } else {
175
+ if (!is_log_softmax) {
176
+ // Masked values are treated as -infinity, and std::exp(-infinity) is 0.
177
+ elements[i][it] = 0;
178
+ }
179
+ }
180
+ } else {
181
+ if (!is_log_softmax) {
182
+ elements[i][it] = 0.;
183
+ }
184
+ }
185
+ }
186
+ }
187
+ }
188
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
189
+
190
+ // store result
191
+ #pragma unroll
192
+ for (int i = 0; i < WARP_BATCH; ++i) {
193
+ if (i >= local_batches)
194
+ break;
195
+ if (is_log_softmax) sum[i] = std::log(sum[i]);
196
+ #pragma unroll
197
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
198
+ int element_index = local_idx + it * WARP_SIZE;
199
+ if (element_index < element_count) {
200
+ if (is_log_softmax) {
201
+ dst[i*element_count+it*WARP_SIZE] = elements[i][it] - max_value[i] - sum[i];
202
+ } else if (sum[i] == 0) {
203
+ dst[i*element_count+it*WARP_SIZE] = std::numeric_limits<acc_t>::quiet_NaN();
204
+ } else {
205
+ dst[i*element_count+it*WARP_SIZE] = elements[i][it] / sum[i];
206
+ }
207
+ } else {
208
+ break;
209
+ }
210
+ }
211
+ }
212
+ }
213
+
214
+ template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax, bool is_masked>
215
+ __global__ void softmax_warp_backward(output_t *gradInput, const input_t *grad, const input_t *output, int batch_size, int stride, int element_count, const bool *mask = nullptr)
216
+ {
217
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_backward_kernel.
218
+ constexpr int next_power_of_two = 1 << log2_elements;
219
+ constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
220
+ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
221
+ constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
222
+
223
+ int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
224
+
225
+ // batch_size might not be a multiple of WARP_BATCH. Check how
226
+ // many batches have to computed within this WARP.
227
+ int local_batches = batch_size - first_batch;
228
+ if (local_batches > WARP_BATCH)
229
+ local_batches = WARP_BATCH;
230
+
231
+ // there might be multiple batches per warp. compute the index within the batch
232
+ int local_idx = threadIdx.x % WARP_SIZE;
233
+
234
+ // the first element to process by the current thread
235
+ int thread_offset = first_batch * stride + local_idx;
236
+ grad += thread_offset;
237
+ output += thread_offset;
238
+ gradInput += thread_offset;
239
+ if (is_masked) {
240
+ mask += thread_offset;
241
+ }
242
+
243
+ // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
244
+ // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
245
+ // the nested loops.
246
+ // This should have no impact on performance because the loops are unrolled anyway.
247
+
248
+ // load data from global memory
249
+ acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS];
250
+ acc_t output_reg[WARP_BATCH][WARP_ITERATIONS];
251
+ for (int i = 0; i < WARP_BATCH; ++i) {
252
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
253
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
254
+ int element_index = local_idx + it * WARP_SIZE;
255
+ if (element_index < batch_element_count) {
256
+ grad_reg[i][it] = grad[i*element_count+it*WARP_SIZE];
257
+ output_reg[i][it] = output[i*element_count+it*WARP_SIZE];
258
+ } else {
259
+ grad_reg[i][it] = acc_t(0);
260
+ output_reg[i][it] = acc_t(0);
261
+ }
262
+ }
263
+ }
264
+
265
+ acc_t sum[WARP_BATCH] { 0.0f };
266
+ #pragma unroll
267
+ for (int i = 0; i < WARP_BATCH; ++i) {
268
+ #pragma unroll
269
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
270
+ if (!is_masked || !mask[i*element_count+it*WARP_SIZE]) {
271
+ sum[i] += grad_reg[i][it];
272
+ }
273
+ }
274
+ }
275
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
276
+
277
+ // store result
278
+ #pragma unroll
279
+ for (int i = 0; i < WARP_BATCH; ++i) {
280
+ if (i >= local_batches)
281
+ break;
282
+ #pragma unroll
283
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
284
+ int element_index = local_idx + it * WARP_SIZE;
285
+ if (element_index < element_count) {
286
+ if (is_masked && mask[i*element_count+it*WARP_SIZE]) {
287
+ gradInput[i*element_count+it*WARP_SIZE] = 0;
288
+ }
289
+ // compute gradients
290
+ else if (is_log_softmax) {
291
+ gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - std::exp(output_reg[i][it]) * sum[i]);
292
+ } else {
293
+ gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - output_reg[i][it] * sum[i]);
294
+ }
295
+ }
296
+ }
297
+ }
298
+ }
299
+
300
+ } // end of anonymous namespace
301
+
302
+ template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax, bool is_masked>
303
+ void dispatch_softmax_forward(output_t *dst, const input_t *src, int softmax_elements, int softmax_elements_stride, int batch_count, const bool *mask = nullptr, int chunk_size = -1, bool is_transformer_mask = false)
304
+ {
305
+ TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
306
+ if (softmax_elements == 0) {
307
+ return;
308
+ } else {
309
+ int log2_elements = log2_ceil(softmax_elements);
310
+ const int next_power_of_two = 1 << log2_elements;
311
+
312
+ // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
313
+ int warp_size = at::cuda::warp_size();
314
+ warp_size = (next_power_of_two < warp_size) ? next_power_of_two : warp_size;
315
+
316
+ // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
317
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
318
+
319
+ // use 128 threads per block to maximimize gpu utilization
320
+ constexpr int threads_per_block = 128;
321
+
322
+ int warps_per_block = (threads_per_block / warp_size);
323
+ int batches_per_block = warps_per_block * batches_per_warp;
324
+ int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
325
+ dim3 threads(warp_size, warps_per_block, 1);
326
+ // Launch code would be more elegant if C++ supported FOR CONSTEXPR
327
+ switch (log2_elements) {
328
+ #define LAUNCH_SOFTMAX_WARP_FORWARD(L2E) case L2E: \
329
+ softmax_warp_forward<input_t, output_t, acc_t, L2E, is_log_softmax, is_masked> \
330
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, \
331
+ src, batch_count, softmax_elements_stride, softmax_elements, mask, chunk_size, is_transformer_mask); \
332
+ C10_CUDA_KERNEL_LAUNCH_CHECK(); \
333
+ break;
334
+
335
+ LAUNCH_SOFTMAX_WARP_FORWARD(0); // 1
336
+ LAUNCH_SOFTMAX_WARP_FORWARD(1); // 2
337
+ LAUNCH_SOFTMAX_WARP_FORWARD(2); // 4
338
+ LAUNCH_SOFTMAX_WARP_FORWARD(3); // 8
339
+ LAUNCH_SOFTMAX_WARP_FORWARD(4); // 16
340
+ LAUNCH_SOFTMAX_WARP_FORWARD(5); // 32
341
+ LAUNCH_SOFTMAX_WARP_FORWARD(6); // 64
342
+ LAUNCH_SOFTMAX_WARP_FORWARD(7); // 128
343
+ LAUNCH_SOFTMAX_WARP_FORWARD(8); // 256
344
+ LAUNCH_SOFTMAX_WARP_FORWARD(9); // 512
345
+ LAUNCH_SOFTMAX_WARP_FORWARD(10); ; // 1024
346
+ default:
347
+ break;
348
+ }
349
+ }
350
+ }
351
+
352
+ template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax, bool is_masked>
353
+ void dispatch_softmax_backward(output_t *grad_input, const input_t *grad, const input_t *output, int softmax_elements, int softmax_elements_stride, int batch_count, const bool *mask = nullptr)
354
+ {
355
+ TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
356
+ if (softmax_elements == 0) {
357
+ return;
358
+ } else {
359
+ int log2_elements = log2_ceil(softmax_elements);
360
+ const int next_power_of_two = 1 << log2_elements;
361
+
362
+ // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward.
363
+ int warp_size = at::cuda::warp_size();
364
+ warp_size = (next_power_of_two < warp_size) ? next_power_of_two : warp_size;
365
+
366
+ // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward.
367
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
368
+
369
+ // use 128 threads per block to maximimize gpu utilization
370
+ constexpr int threads_per_block = 128;
371
+
372
+ int warps_per_block = (threads_per_block / warp_size);
373
+ int batches_per_block = warps_per_block * batches_per_warp;
374
+ int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
375
+ dim3 threads(warp_size, warps_per_block, 1);
376
+ // Launch code would be more elegant if C++ supported FOR CONSTEXPR
377
+ switch (log2_elements) {
378
+ #define LAUNCH_SOFTMAX_WARP_BACKWARD(L2E) case L2E: \
379
+ softmax_warp_backward<input_t, output_t, acc_t, L2E, is_log_softmax, is_masked> \
380
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> \
381
+ (grad_input, grad, output, batch_count, softmax_elements_stride, \
382
+ softmax_elements, mask); \
383
+ C10_CUDA_KERNEL_LAUNCH_CHECK(); \
384
+ break;
385
+
386
+ LAUNCH_SOFTMAX_WARP_BACKWARD(0); // 1
387
+ LAUNCH_SOFTMAX_WARP_BACKWARD(1); // 2
388
+ LAUNCH_SOFTMAX_WARP_BACKWARD(2); // 4
389
+ LAUNCH_SOFTMAX_WARP_BACKWARD(3); // 8
390
+ LAUNCH_SOFTMAX_WARP_BACKWARD(4); // 16
391
+ LAUNCH_SOFTMAX_WARP_BACKWARD(5); // 32
392
+ LAUNCH_SOFTMAX_WARP_BACKWARD(6); // 64
393
+ LAUNCH_SOFTMAX_WARP_BACKWARD(7); // 128
394
+ LAUNCH_SOFTMAX_WARP_BACKWARD(8); // 256
395
+ LAUNCH_SOFTMAX_WARP_BACKWARD(9); // 512
396
+ LAUNCH_SOFTMAX_WARP_BACKWARD(10); // 1024
397
+ default:
398
+ break;
399
+ }
400
+ }
401
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Resize.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/EmptyTensor.h>
4
+ #include <ATen/native/ResizeCommon.h>
5
+
6
+ #include <c10/cuda/CUDAGuard.h>
7
+
8
+ namespace at { namespace native {
9
+
10
+ TORCH_CUDA_CPP_API void resize_bytes_cuda(StorageImpl* storage, size_t size_bytes);
11
+
12
+ static inline void maybe_resize_storage_cuda(TensorImpl* self, size_t new_size_bytes) {
13
+ // It does not make sense to try to resize a storage
14
+ // to hold 0 elements, and this can break
15
+ // if storage_offset is positive but
16
+ // new_size is 0, so just bail in that case
17
+ // (same comment is in Resize.h)
18
+ if (self->numel() == 0) {
19
+ return;
20
+ }
21
+
22
+ const Storage &storage = self->unsafe_storage();
23
+ TORCH_CHECK(storage, "Tensor: invalid null storage");
24
+ if (new_size_bytes > storage.nbytes()) {
25
+ resize_bytes_cuda(storage.unsafeGetStorageImpl(), new_size_bytes);
26
+ }
27
+ }
28
+
29
+ inline TensorImpl* resize_impl_cuda_(
30
+ TensorImpl* self,
31
+ IntArrayRef size,
32
+ at::OptionalIntArrayRef stride,
33
+ bool device_guard = true) {
34
+ if (self->sizes() == size && (!stride || self->strides() == stride)) {
35
+ return self;
36
+ }
37
+
38
+ // NB: We don't need to hold the device guard when calling from TH
39
+ cuda::OptionalCUDAGuard guard;
40
+ if (device_guard) {
41
+ guard.set_index(self->storage().device().index());
42
+ }
43
+
44
+ const auto itemsize = self->dtype().itemsize();
45
+ const auto storage_offset = self->storage_offset();
46
+ size_t storage_size = 1;
47
+ if (stride) {
48
+ self->set_sizes_and_strides(size, *stride);
49
+ storage_size = at::detail::computeStorageNbytes(
50
+ size, *stride, itemsize, storage_offset);
51
+ } else {
52
+ self->set_sizes_contiguous(size);
53
+ storage_size = at::detail::computeStorageNbytesContiguous(
54
+ size, itemsize, storage_offset);
55
+ }
56
+ maybe_resize_storage_cuda(self, storage_size);
57
+
58
+ return self;
59
+ }
60
+
61
+ }}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ScanKernels.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+
7
+ namespace native {
8
+
9
+ // NOTE: these functions require output tensors to be contiguous
10
+ void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values,
11
+ const TensorBase& indices, int64_t dim);
12
+ void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values,
13
+ const TensorBase& indices, int64_t dim);
14
+ void launch_logcumsumexp_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim);
15
+ void launch_cumsum_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim);
16
+ void launch_cumprod_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim);
17
+
18
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sorting.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+ }
7
+
8
+ namespace at {
9
+ namespace native {
10
+
11
+ void launch_kthvalue_kernel(
12
+ const TensorBase &values, const TensorBase &indices,
13
+ const TensorBase &self, int64_t dim, int64_t k);
14
+ void launch_median_kernel(
15
+ const TensorBase &vals, const TensorBase &inds,
16
+ const TensorBase &in, int64_t dim, bool ignore_nan);
17
+
18
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortingRadixSelect.cuh ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ceil_div.h>
2
+ #include <ATen/cuda/Atomic.cuh>
3
+ #include <ATen/cuda/DeviceUtils.cuh>
4
+ #include <ATen/cuda/AsmUtils.cuh>
5
+ #include <c10/macros/Macros.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ template <typename scalar_t>
11
+ struct TopKTypeConfig {};
12
+
13
+ template <>
14
+ struct TopKTypeConfig<float> {
15
+ typedef uint32_t RadixType;
16
+
17
+ // Converts a float to an integer representation with the same
18
+ // sorting; i.e., for floats f1, f2:
19
+ // if f1 < f2 then convert(f1) < convert(f2)
20
+ // We use this to enable radix selection of floating-point values.
21
+ // This also gives a relative order for NaNs, but that's ok, as they
22
+ // will all be adjacent
23
+ // neg inf: signbit=1 exp=ff fraction=0 --> radix = 0 00 ff..
24
+ // pos inf: signbit=0 exp=ff fraction=0 --> radix = 1 ff 00..
25
+ // pos nan: signbit=0 exp=ff fraction>0 --> radix = 1 ff x>0
26
+ // neg nan: signbit=1 exp=ff fraction>0 --> radix = 0 00 x<ff...
27
+ static inline __device__ RadixType convert(float v) {
28
+ RadixType x = __float_as_int(v);
29
+ RadixType mask = (x & 0x80000000) ? 0xffffffff : 0x80000000;
30
+
31
+ return (v == v) ? (x ^ mask) : 0xffffffff;
32
+ }
33
+
34
+ static inline __device__ float deconvert(RadixType v) {
35
+ RadixType mask = (v & 0x80000000) ? 0x80000000 : 0xffffffff;
36
+
37
+ return __int_as_float(v ^ mask);
38
+ }
39
+ };
40
+
41
+ template <>
42
+ struct TopKTypeConfig<uint8_t> {
43
+ typedef uint32_t RadixType;
44
+
45
+ static inline __device__ RadixType convert(uint8_t v) {
46
+ return v;
47
+ }
48
+
49
+ static inline __device__ uint8_t deconvert(RadixType v) {
50
+ return v;
51
+ }
52
+ };
53
+
54
+ template <>
55
+ struct TopKTypeConfig<int8_t> {
56
+ typedef uint32_t RadixType;
57
+
58
+ static inline __device__ RadixType convert(int8_t v) {
59
+ return 128u + v;
60
+ }
61
+
62
+ static inline __device__ int8_t deconvert(RadixType v) {
63
+ return v - 128;
64
+ }
65
+ };
66
+
67
+ template <>
68
+ struct TopKTypeConfig<int16_t> {
69
+ typedef uint32_t RadixType;
70
+
71
+ static inline __device__ RadixType convert(int16_t v) {
72
+ static_assert(sizeof(short) == 2, "");
73
+ return 32768u + v;
74
+ }
75
+
76
+ static inline __device__ int16_t deconvert(RadixType v) {
77
+ return v - 32768;
78
+ }
79
+ };
80
+
81
+ template <>
82
+ struct TopKTypeConfig<int32_t> {
83
+ typedef uint32_t RadixType;
84
+
85
+ static inline __device__ RadixType convert(int32_t v) {
86
+ static_assert(sizeof(int) == 4, "");
87
+ return 2147483648u + v;
88
+ }
89
+
90
+ static inline __device__ int32_t deconvert(RadixType v) {
91
+ return v - 2147483648u;
92
+ }
93
+ };
94
+
95
+ template <>
96
+ struct TopKTypeConfig<int64_t> {
97
+ typedef uint64_t RadixType;
98
+
99
+ static inline __device__ RadixType convert(int64_t v) {
100
+ static_assert(sizeof(int64_t) == 8, "");
101
+ return 9223372036854775808ull + v;
102
+ }
103
+
104
+ static inline __device__ int64_t deconvert(RadixType v) {
105
+ return v - 9223372036854775808ull;
106
+ }
107
+ };
108
+
109
+ template <>
110
+ struct TopKTypeConfig<double> {
111
+ typedef uint64_t RadixType;
112
+
113
+ static inline __device__ RadixType convert(double v) {
114
+ RadixType x = __double_as_longlong(v);
115
+ RadixType mask = -((x >> 63)) | 0x8000000000000000;
116
+ return (v == v) ? (x ^ mask) : 0xffffffffffffffff;
117
+ }
118
+
119
+ static inline __device__ double deconvert(RadixType v) {
120
+ RadixType mask = ((v >> 63) - 1) | 0x8000000000000000;
121
+ return __longlong_as_double(v ^ mask);
122
+ }
123
+ };
124
+
125
+ template <>
126
+ struct TopKTypeConfig<at::Half> {
127
+ typedef uint32_t RadixType;
128
+
129
+ static inline __device__ RadixType convert(at::Half v) {
130
+ #if defined(__CUDA_ARCH__) || defined(USE_ROCM)
131
+ RadixType x = __half_as_ushort(v);
132
+ RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000;
133
+ return (v == v) ? (x ^ mask) : 0xffff;
134
+ #else
135
+ CUDA_KERNEL_ASSERT(false);
136
+ return 0u;
137
+ #endif
138
+ }
139
+
140
+ static inline __device__ at::Half deconvert(RadixType v) {
141
+ #if defined(__CUDA_ARCH__) || defined(USE_ROCM)
142
+ RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff;
143
+ return __ushort_as_half(v ^ mask);
144
+ #else
145
+ CUDA_KERNEL_ASSERT(false);
146
+ return static_cast<at::Half>(0);
147
+ #endif
148
+ }
149
+ };
150
+
151
+ template <>
152
+ struct TopKTypeConfig<at::BFloat16> {
153
+ typedef uint32_t RadixType;
154
+
155
+ static inline __device__ RadixType convert(at::BFloat16 v) {
156
+ RadixType x = v.x;
157
+ RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000;
158
+ return (v == v) ? (x ^ mask) : 0xffff;
159
+ }
160
+
161
+ static inline __device__ at::BFloat16 deconvert(RadixType v) {
162
+ RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff;
163
+ at::BFloat16 r;
164
+ r.x = (v ^ mask);
165
+ return r;
166
+ }
167
+ };
168
+
169
+ // This function counts the distribution of all input values in a
170
+ // slice we are selecting by radix digit at `radixDigitPos`, but only
171
+ // those that pass the filter `((v & desiredMask) == desired)`.
172
+ // This produces and broadcasts the seen counts for a single block only.
173
+ // `smem` must have at least `RadixSize` elements.
174
+ template <
175
+ typename scalar_t,
176
+ typename bitwise_t,
177
+ typename index_t,
178
+ typename CountType,
179
+ int RadixSize,
180
+ int RadixBits>
181
+ __device__ void countRadixUsingMask(
182
+ CountType counts[RadixSize],
183
+ CountType* smem,
184
+ bitwise_t desired,
185
+ bitwise_t desiredMask,
186
+ int radixDigitPos,
187
+ index_t sliceSize,
188
+ index_t withinSliceStride,
189
+ scalar_t* data) {
190
+ // Clear out per-thread counts from a previous round
191
+ #pragma unroll
192
+ for (int i = 0; i < RadixSize; ++i) {
193
+ counts[i] = 0;
194
+ }
195
+
196
+ if (threadIdx.x < RadixSize) {
197
+ smem[threadIdx.x] = 0;
198
+ }
199
+ __syncthreads();
200
+
201
+ // Scan over all the data. Upon a read, the warp will accumulate
202
+ // counts per each digit in the radix using warp voting.
203
+ #if !defined(USE_ROCM)
204
+ // Must be called outside of loop to ensure all threads participate
205
+ unsigned mask = WARP_BALLOT(threadIdx.x < sliceSize);
206
+ #endif
207
+ for (index_t i = threadIdx.x; i < sliceSize;) {
208
+ bitwise_t val =
209
+ TopKTypeConfig<scalar_t>::convert(doLdg(&data[i * withinSliceStride]));
210
+
211
+ bool hasVal = ((val & desiredMask) == desired);
212
+ bitwise_t digitInRadix = at::cuda::Bitfield<bitwise_t>::getBitfield(
213
+ val, radixDigitPos, RadixBits);
214
+
215
+ #pragma unroll
216
+ for (uint32_t j = 0; j < RadixSize; ++j) {
217
+ bool vote = hasVal && (digitInRadix == j);
218
+ #if defined(USE_ROCM)
219
+ counts[j] += __popcll(WARP_BALLOT(vote));
220
+ #else
221
+ counts[j] += __popc(WARP_BALLOT(vote, mask));
222
+ #endif
223
+ }
224
+ i += blockDim.x;
225
+ #if !defined(USE_ROCM)
226
+ mask = WARP_BALLOT(i < sliceSize, mask);
227
+ #endif
228
+ }
229
+
230
+ // Now, for each warp, sum values
231
+ if (at::cuda::getLaneId() == 0) {
232
+ #pragma unroll
233
+ for (uint32_t i = 0; i < RadixSize; ++i) {
234
+ gpuAtomicAddNoReturn(&smem[i], counts[i]);
235
+ }
236
+ }
237
+
238
+ __syncthreads();
239
+
240
+ // For each thread, read in the total counts
241
+ #pragma unroll
242
+ for (uint32_t i = 0; i < RadixSize; ++i) {
243
+ counts[i] = smem[i];
244
+ }
245
+
246
+ __syncthreads();
247
+ }
248
+
249
+ // Over what radix we are selecting values
250
+ constexpr int RADIX_BITS = 2; // digits are base-(2 ^ RADIX_BITS)
251
+ constexpr int RADIX_SIZE = 4; // 2 ^ RADIX_BITS
252
+ constexpr int RADIX_MASK = (RADIX_SIZE - 1);
253
+
254
+ // This finds the unique value `v` that matches the pattern
255
+ // ((v & desired) == desiredMask) in our sorted int format
256
+ template <typename scalar_t, typename bitwise_t, typename index_t>
257
+ __device__ scalar_t findPattern(
258
+ scalar_t* smem,
259
+ scalar_t* data,
260
+ index_t sliceSize,
261
+ index_t withinSliceStride,
262
+ bitwise_t desired,
263
+ bitwise_t desiredMask) {
264
+ if (threadIdx.x < 2) {
265
+ smem[threadIdx.x] = static_cast<scalar_t>(0);
266
+ }
267
+ __syncthreads();
268
+
269
+ // All threads participate in the loop, in order to sync on the flag
270
+ index_t numIterations =
271
+ round_up(sliceSize, static_cast<index_t>(blockDim.x));
272
+ for (index_t i = threadIdx.x; i < numIterations; i += blockDim.x) {
273
+ bool inRange = (i < sliceSize);
274
+ scalar_t v = inRange ? doLdg(&data[i * withinSliceStride])
275
+ : static_cast<scalar_t>(0);
276
+
277
+ if (inRange &&
278
+ ((TopKTypeConfig<scalar_t>::convert(v) & desiredMask) == desired)) {
279
+ // There should not be conflicts if we are using findPattern,
280
+ // since the result is unique
281
+ smem[0] = static_cast<scalar_t>(1);
282
+ smem[1] = v; // can't use val as the flag, since it could be 0
283
+ }
284
+
285
+ __syncthreads();
286
+
287
+ scalar_t found = smem[0];
288
+ scalar_t val = smem[1];
289
+
290
+ __syncthreads();
291
+
292
+ // Check to see if a thread found the value
293
+ if (found != static_cast<scalar_t>(0)) {
294
+ // all threads return this value
295
+ return val;
296
+ }
297
+ }
298
+
299
+ // should not get here
300
+ CUDA_KERNEL_ASSERT(false);
301
+ return static_cast<scalar_t>(0);
302
+ }
303
+
304
+ // Returns the top-Kth element found in the data using radix selection
305
+ template <typename scalar_t, typename bitwise_t, typename index_t>
306
+ __device__ void radixSelect(
307
+ scalar_t* data,
308
+ index_t k,
309
+ bool largest,
310
+ index_t sliceSize,
311
+ index_t withinSliceStride,
312
+ int* smem,
313
+ scalar_t* topK) {
314
+ // Per-thread buckets into which we accumulate digit counts in our
315
+ // radix
316
+ int counts[RADIX_SIZE];
317
+
318
+ // We only consider elements x such that (x & desiredMask) == desired
319
+ // Initially, we consider all elements of the array, so the above
320
+ // statement is true regardless of input.
321
+ bitwise_t desired = 0;
322
+ bitwise_t desiredMask = 0;
323
+
324
+ // We are looking for the top kToFind-th element when iterating over
325
+ // digits; this count gets reduced by elimination when counting
326
+ // successive digits
327
+ int kToFind = k;
328
+
329
+ // We start at the most significant digit in our radix, scanning
330
+ // through to the least significant digit
331
+ for (int digitPos = sizeof(scalar_t) * 8 - RADIX_BITS; digitPos >= 0;
332
+ digitPos -= RADIX_BITS) {
333
+ // Count radix distribution for the current position and reduce
334
+ // across all threads
335
+ countRadixUsingMask<
336
+ scalar_t,
337
+ bitwise_t,
338
+ index_t,
339
+ int,
340
+ RADIX_SIZE,
341
+ RADIX_BITS>(
342
+ counts,
343
+ smem,
344
+ desired,
345
+ desiredMask,
346
+ digitPos,
347
+ sliceSize,
348
+ withinSliceStride,
349
+ data);
350
+
351
+ auto found_unique = [&](int i, int count) -> bool {
352
+ /* All threads have the same value in counts here, so all */
353
+ /* threads will return from the function. */
354
+ if (count == 1 && kToFind == 1) {
355
+ /* There is a unique answer. */
356
+ desired = at::cuda::Bitfield<bitwise_t>::setBitfield(
357
+ desired, i, digitPos, RADIX_BITS);
358
+ desiredMask = at::cuda::Bitfield<bitwise_t>::setBitfield(
359
+ desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
360
+
361
+ /* The answer is now the unique element v such that: */
362
+ /* (v & desiredMask) == desired */
363
+ /* However, we do not yet know what the actual element is. We */
364
+ /* need to perform a search through the data to find the */
365
+ /* element that matches this pattern. */
366
+ *topK = findPattern<scalar_t, bitwise_t, index_t>(
367
+ (scalar_t*)smem,
368
+ data,
369
+ sliceSize,
370
+ withinSliceStride,
371
+ desired,
372
+ desiredMask);
373
+ return true;
374
+ }
375
+ return false;
376
+ };
377
+ auto found_non_unique = [&](int i, int count) -> bool {
378
+ if (count >= kToFind) {
379
+ desired =
380
+ at::cuda::Bitfield<bitwise_t>::setBitfield(
381
+ desired, i, digitPos, RADIX_BITS);
382
+ desiredMask = at::cuda::Bitfield<bitwise_t>::setBitfield(
383
+ desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
384
+
385
+ /* The top-Kth element v must now be one such that: */
386
+ /* (v & desiredMask == desired) */
387
+ /* but we haven't narrowed it down; we must check the next */
388
+ /* least-significant digit */
389
+ return true;
390
+ }
391
+ kToFind -= count;
392
+ return false; // continue the loop
393
+ };
394
+
395
+ // All threads participate in the comparisons below to know the
396
+ // final result
397
+ if (largest) {
398
+ // Process in descending order
399
+ #pragma unroll
400
+ for (int i = RADIX_SIZE - 1; i >= 0; --i) {
401
+ int count = counts[i];
402
+ if (found_unique(i, count)) {
403
+ return;
404
+ }
405
+ if (found_non_unique(i, count)) {
406
+ break;
407
+ }
408
+ }
409
+ } else {
410
+ // Process in ascending order
411
+ #pragma unroll
412
+ for (int i = 0; i < RADIX_SIZE; ++i) {
413
+ int count = counts[i];
414
+ if (found_unique(i, count)) {
415
+ return;
416
+ }
417
+ if (found_non_unique(i, count)) {
418
+ break;
419
+ }
420
+ }
421
+ }
422
+ } // end digitPos for
423
+
424
+ // There is no unique result, but there is a non-unique result
425
+ // matching `desired` exactly
426
+ *topK = TopKTypeConfig<scalar_t>::deconvert(desired);
427
+ }
428
+ } // namespace native
429
+ } // namespace at